gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import unittest
import numpy as np
from mxnet import gluon
from mxnet.gluon import nn
from mxnet.test_utils import assert_almost_equal
from common import setup_module, with_seed, assertRaises
from copy import deepcopy
from nose.tools import raises, assert_raises
@with_seed()
@raises(RuntimeError)
def test_multi_trainer():
x = gluon.Parameter('x', shape=(10,), stype='row_sparse')
x.initialize()
# test set trainer
trainer0 = gluon.Trainer([x], 'sgd')
assert(x._trainer is trainer0)
# test unset trainer
x._set_trainer(None)
assert(x._trainer is None)
x._set_trainer(trainer0)
# multiple trainers for a sparse Parameter is not allowed
trainer1 = gluon.Trainer([x], 'sgd')
@with_seed()
def test_trainer():
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -2).all()
x.lr_mult = 0.5
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert (x.data(mx.cpu(1)).asnumpy() == -4).all()
trainer.save_states('test_trainer.states')
states = deepcopy(trainer._kvstore._updater.states) if trainer._update_on_kvstore \
else deepcopy(trainer._updaters[0].states)
trainer.load_states('test_trainer.states')
if trainer._update_on_kvstore:
dict_equ(trainer._kvstore._updater.states, states)
assert trainer._optimizer == trainer._kvstore._updater.optimizer
else:
for updater in trainer._updaters:
dict_equ(updater.states, states)
assert trainer._optimizer == trainer._updaters[0].optimizer
assert_raises(AssertionError, trainer.update, 1)
assert_raises(AssertionError, trainer.allreduce_grads)
x = gluon.Parameter('x', shape=(10,))
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer2 = gluon.Trainer([x], 'sgd', {'learning_rate': 1.0, 'momentum': 0.5},
update_on_kvstore=False)
with mx.autograd.record():
for i, w in enumerate(x.list_data()):
y = i*w
y.backward()
assert (x.grad(mx.cpu(0)).asnumpy() != x.grad(mx.cpu(1)).asnumpy()).all()
trainer2.allreduce_grads()
assert (x.grad(mx.cpu(0)).asnumpy() == x.grad(mx.cpu(1)).asnumpy()).all()
trainer2.update(1)
assert (x.data(mx.cpu(1)).asnumpy() == -1).all(), x.data(mx.cpu(1)).asnumpy()
@with_seed()
def test_trainer_save_load():
x = gluon.Parameter('x', shape=(10,), lr_mult=1.0)
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
trainer.save_states('test_trainer_save_load.states')
trainer.load_states('test_trainer_save_load.states')
x.lr_mult = 2.0
# check if parameter dict is correctly associated with optimizer after load_state
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
@with_seed()
def test_trainer_multi_layer_init():
class Net(gluon.Block):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
# sparse param
self.embed_weight = self.params.get('embed_weight', stype='row_sparse',
shape=(4,3), grad_stype='row_sparse')
# dense param from a hybrid block
self.dense0 = nn.Dense(2)
def forward(self, x):
embed_weight = self.embed_weight.row_sparse_data(x)
embed = mx.nd.Embedding(data=x, weight=embed_weight,
input_dim=4, output_dim=3, sparse_grad=True)
return self.dense0(embed)
def check_init(ctxes):
net = Net(prefix='net_')
net.initialize(mx.init.One(), ctx=ctxes)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 1})
data = mx.nd.array([[0,2], [1,2]])
xs = gluon.utils.split_and_load(data, ctxes)
ys = []
with mx.autograd.record():
for x in xs:
y = net(x)
ys.append(y)
for y in ys:
y.backward()
trainer.step(1)
# all parameters should be initialized
assert not trainer._params_to_init
all_rows = mx.nd.arange(0, 4, ctx=mx.cpu(1))
# check the updated weights
weight = net.embed_weight.row_sparse_data(all_rows).asnumpy()
assert (weight[0] == -1).all()
assert (weight[1] == -1).all()
assert (weight[2] == -3).all()
assert (weight[3] == 1).all()
check_init([mx.cpu(1), mx.cpu(2)])
check_init([mx.cpu(1)])
@with_seed()
def test_trainer_save_load():
x = gluon.Parameter('x', shape=(10,), lr_mult=1.0)
x.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer([x], 'sgd', {'learning_rate': 0.1})
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.1
trainer.save_states('test_trainer_save_load.states')
trainer.load_states('test_trainer_save_load.states')
x.lr_mult = 2.0
# check if parameter dict is correctly associated with optimizer after load_state
assert trainer._kvstore._updater.optimizer._get_lr(0) == 0.2
@with_seed()
def test_trainer_reset_kv():
def check_trainer_reset_kv(kv):
params = gluon.ParameterDict()
x = params.get('x', shape=(10,), lr_mult=1.0)
params.initialize(ctx=[mx.cpu(0), mx.cpu(1)], init='zeros')
trainer = gluon.Trainer(params, 'sgd', {'learning_rate': 0.1}, kvstore=kv)
params.save('test_trainer_reset_kv.params')
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
assert trainer._kvstore.type == kv
# load would reset kvstore
mx.nd.waitall()
params.load('test_trainer_reset_kv.params')
assert trainer._kvstore is None
assert trainer._kv_initialized is False
with mx.autograd.record():
for w in x.list_data():
y = w + 1
y.backward()
trainer.step(1)
# the updated parameter should be based on the loaded checkpoint
assert (x.data(mx.cpu()) == -0.2).asnumpy().all()
kvs = ['local', 'device']
for kv in kvs:
check_trainer_reset_kv(kv)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.test_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.test_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.test_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.test_session(use_gpu=True) as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.test_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.test_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.test_session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
| |
from __future__ import print_function
from info import perceptualProps, adjective_clusters, noun_clusters
from sys import argv, stderr
import os
from sets import Set
from itertools import combinations
from copy import copy
from ubigraph_thread import UGraph
from clique import Clique
from sepNode import SepNode
_cooc_table = dict()
_cliques = list()
_sep_nodes = list()
_edges = list()
_concepts = list()
_objects = list()
_labels = list()
_obj_labels = dict()
PRIOR_PROBABILITY = 0.005
is_dual_concept_cliques = True
# preamble for dicothomies! might find a better representation later. ---hande
not_list=dict()
not_list['hard'] = 'soft'
not_list['soft'] = 'hard'
not_list['thin'] = 'thick'
not_list['thick'] = 'thin'
not_list['noisy'] = 'silent'
not_list['silent'] = 'noisy'
not_list['tall'] = 'short'
not_list['short'] = 'tall'
not_list['round'] = 'edgy'
not_list['edgy'] = 'round'
not_list['s_colored'] = 'colorful'
not_list['colorful'] = 's_colored'
not_list['covered'] = 'uncovered'
not_list['uncovered'] = 'covered'
def read_speech_data():
global _labels, _obj_labels
label_set = Set()
fp = open('../../incomplete_speech.txt', 'r')
for line in fp.readlines():
words = line.strip('\n').split(',')
obj_name = words[0]
_obj_labels[obj_name] = words[1:]
for desc in words[1:]:
label_set.add(desc)
_labels = list(label_set)
def is_edge(p1, p2):
global _edges
if [p1, p2] in _edges or [p2, p1] in _edges:
return True
return False
def append_edges(list_concept):
global _concepts, _edges
num_concept = len(list_concept)
for ind in range(num_concept):
start, tail = list_concept[ind], list_concept[ind+1:]
for end in tail:
if not is_edge(start, end):
_edges.append([start, end])
def clear():
global _cliques
_cliques = []
for sep_node in _sep_nodes:
sep_node.clear()
def read_cooccurrences():
global _cooc_table, _concepts, _labels, _obj_labels, _edges, _concept_objects, _sep_nodes, _objects
# temporarily add speech labels to the list of pure concepts
_concepts = list(Set([prop for props in perceptualProps.values() for prop in props])) + _labels
# create separator nodes from pure concepts
_sep_nodes = [SepNode(adj_1, adj_2) for adj_1, adj_2 in adjective_clusters]
_sep_nodes += [SepNode(noun) for noun in noun_clusters]
# create separator nodes from speech labels
for label in _labels:
if label in not_list.keys():
if label < not_list[label]:
_sep_nodes += [SepNode(label, not_list[label])]
else:
_sep_nodes += [SepNode(label)]
# create edges & inverted list of objects for given concepts+labels
_concept_objects = dict().fromkeys(_concepts)
for obj_name, props in perceptualProps.items():
append_edges(props + _obj_labels[obj_name])
for concept in props + _obj_labels[obj_name]:
if _concept_objects[concept] is None:
_concept_objects[concept] = [obj_name]
else:
_concept_objects[concept].append(obj_name)
# set globals
num_samples = len(perceptualProps)
_objects = perceptualProps.keys()
# restore pure concepts w/o speech labels
_concepts = list(Set([prop for props in perceptualProps.values() for prop in props]))
def is_clique(vertex, list_vertices):
for v in list_vertices:
if not is_edge(vertex, v):
return False
return True
def remove_dup(new_clique, cliques):
if is_cliques_equal(new_clique, ['box', 'colorful', 'covered', 'edgy', 'graspable', 'hard', 'noisy', 'pushable', 'short', 'sponge', 'thick', 'workshop']):
is_print = True
else:
is_print = False
for ind, clique in enumerate(cliques):
clique_set = Set(clique)
new_clique_set = Set(new_clique)
if clique_set.difference(new_clique_set) == Set():
#if is_print:
# print('option 1')
# print('clique found:', clique)
cliques.pop(ind)
#if is_print:
# print('new clique list:', cliques)
# print('returning:', cliques+[new_clique])
return cliques+[new_clique]
elif new_clique_set.difference(clique_set) == Set():
#if is_print:
# print('option 2')
# print('clique found:', clique)
# print('returning:', cliques)
return cliques
return cliques+[new_clique]
def is_cliques_equal(clique_1, clique_2):
set_1 = Set()
set_2 = Set()
for concept in clique_1:
set_1.add(concept)
if concept in not_list.keys():
set_1.add(not_list[concept])
for concept in clique_2:
set_2.add(concept)
if concept in not_list.keys():
set_2.add(not_list[concept])
if set_1 == set_2:
return True
else:
return False
def eliminate_dual_cliques(cliques):
no_duplicates = Set()
for clique in cliques:
duals_added = []
for concept in clique:
duals_added.append(concept)
if concept in not_list.keys():
duals_added.append(not_list[concept])
duals_added.sort()
no_duplicates.add(tuple(duals_added))
cliques = list()
for tpl in no_duplicates:
cliques.append(list(tpl))
for clique in cliques:
for concept in clique:
if concept in not_list.keys() and not_list[concept] in clique:
clique.pop(clique.index(not_list[concept]))
return cliques
def eliminate_subset_cliques(cliques):
new_clique_list = []
for k in range(len(cliques)):
is_encapsulated = False
for i in range(len(cliques)):
if i != k and Set(cliques[k]).issubset(Set(cliques[i])):
is_encapsulated = True
if not is_encapsulated:
new_clique_list.append(cliques[k])
return new_clique_list
def find_cliques(obj_name = None):
global _edges, _concepts, _cliques, _obj_labels, _labels
if obj_name:
concepts_and_labels = _concepts + _obj_labels[obj_name]
else:
concepts_and_labels = _concepts + _labels
cliques = []
for edge in _edges:
if edge[0] in concepts_and_labels and edge[1] in concepts_and_labels:
cliques.append(edge)
ret_cliques = list()
while len(cliques) != 0:
clique = cliques.pop()
other_concepts = Set(concepts_and_labels).difference(Set(clique))
new_clique = None
for other_idx in other_concepts:
if(is_clique(other_idx, clique)):
new_clique = clique + [other_idx]
cliques = remove_dup(new_clique, cliques)
if not new_clique is None:
ret_cliques = remove_dup(copy(new_clique), ret_cliques)
if is_dual_concept_cliques:
ret_cliques = eliminate_dual_cliques(ret_cliques)
ret_cliques = eliminate_subset_cliques(ret_cliques)
_cliques = [Clique(lst) for lst in ret_cliques]
for clique in _cliques:
print(clique._concepts)
def fill_potential_tables():
global _concept_objects, _objects, _cliques
for k, clique in enumerate(_cliques):
for configuration in clique._potential_table.keys():
intersect = Set(perceptualProps.keys())
for i, letter in enumerate(configuration):
corresponding_concept = clique._concepts[i]
if letter == '1':
intersect = intersect.intersection(_concept_objects[corresponding_concept])
else:
intersect = intersect.difference(_concept_objects[corresponding_concept])
if len(intersect) == 0:
probability = PRIOR_PROBABILITY
else:
probability = float(len(intersect)) / float(len(perceptualProps))
_cliques[k]._set_potential(configuration, probability)
clique.normalize_potentials()
def override_sep_potential(concept, value):
global _sep_nodes
print('setting %s: %f', concept, value)
_sep_nodes[_sep_nodes.index(SepNode(concept))].override_potential(concept, value)
def propagate_belief(iteration=1):
global _concepts, _cliques, _sep_nodes
for it in range(iteration):
for k, clique in enumerate(_cliques):
clique._prev_potential_table = copy(clique._potential_table)
#print('\n-----------------------------------------------------------------------------------------------------------------')
#print(clique)
#print('clique concepts:', clique._concepts)
#print('-----------------------------------------------------------------------------------------------------------------\n')
for kind, key in enumerate(clique._potential_table.keys()):
msgs = 1.0
for i, sgn in enumerate(key):
concept = clique._concepts[i]
msg = _sep_nodes[_sep_nodes.index(SepNode(concept))].get_potential(concept, int(sgn))
msgs *= msg
clique._potential_table[key] *= msgs
clique.normalize_potentials()
for concept in clique._concepts:
# Perception etkilenmesin:
#if concept not in _descriptions:
# continue
new_messages = [0, 0]
prev_messages = [0, 0]
for val in [0, 1]:
# calculate new messages
sum = 0
for index in clique._get_multiple_indices({concept:val}):
sum += clique._potential_table[index]
new_messages[val] = sum
# calculate previous messages
sum = 0
for index in clique._get_multiple_indices({concept:val}):
sum += clique._prev_potential_table[index]
prev_messages[val] = sum
if it > 1:
_sep_nodes[_sep_nodes.index(SepNode(concept))].update_potentials(new_messages, concept, prev_messages)
else:
_sep_nodes[_sep_nodes.index(SepNode(concept))].update_potentials(new_messages, concept)
clique.visit()
# normalize the four nouns (box-ball-cup-cylinder) in each iteration
Z = 0
for noun in ['ball', 'box', 'cup', 'cylinder', 'plate', 'tool']:
Z += _sep_nodes[_sep_nodes.index(SepNode(noun))].get_potential(noun, 1)
for noun in ['ball', 'box', 'cup', 'cylinder', 'plate', 'tool']:
norm_potential = _sep_nodes[_sep_nodes.index(SepNode(noun))].get_potential(noun, 1) / Z
_sep_nodes[_sep_nodes.index(SepNode(noun))].override_potential(noun, norm_potential)
_sep_nodes[_sep_nodes.index(SepNode('not_' + noun))].override_potential('not_' + noun, 1 - norm_potential)
# normalize materials (wooden-styrofoam-ceramic-plastic-sponge-carton-metal) in each iteration
Z = 0
for material in ['wooden', 'ceramic', 'plastic', 'sponge', 'carton', 'metal', 'insulation']:
Z += _sep_nodes[_sep_nodes.index(SepNode(material))].get_potential(material, 1)
for material in ['wooden', 'ceramic', 'plastic', 'sponge', 'carton', 'metal', 'insulation']:
norm_potential = _sep_nodes[_sep_nodes.index(SepNode(material))].get_potential(material, 1) / Z
_sep_nodes[_sep_nodes.index(SepNode(material))].override_potential(material, norm_potential)
_sep_nodes[_sep_nodes.index(SepNode('not_' + material))].override_potential('not_' + material, 1 - norm_potential)
# normalize types (workshop-toy-utencil) in each iteration
Z = 0
for tip in ['workshop', 'toy', 'utencil']:
Z += _sep_nodes[_sep_nodes.index(SepNode(tip))].get_potential(tip, 1)
for tip in ['workshop', 'toy', 'utencil']:
norm_potential = _sep_nodes[_sep_nodes.index(SepNode(tip))].get_potential(tip, 1) / Z
_sep_nodes[_sep_nodes.index(SepNode(tip))].override_potential(tip, norm_potential)
_sep_nodes[_sep_nodes.index(SepNode('not_' + tip))].override_potential('not_' + tip, 1 - norm_potential)
def pretty_print():
global _cliques
for cli in _cliques:
cli.print_potential_table()
print ()
def print_sep_values():
global _concepts, _sep_nodes
print('\n---------------- Separator Values -------------------------------------------------\n')
for concept in _concepts:
print('%s: %.5f' % (concept, _sep_nodes[_sep_nodes.index(SepNode(concept))].get_potential(concept, 1)))
def get_marginalized_values(obj_name = None):
global _concepts, _cliques, _obj_labels, _labels
all_marginalized = {}
if obj_name:
concepts_and_labels = _concepts + _obj_labels[obj_name]
else:
concepts_and_labels = _concepts + _labels
for concept in concepts_and_labels:
print(concept)
containing_clique = None
for clique in _cliques:
if concept in clique._concepts:
containing_clique = clique
concept_in_clique = concept
break
if not containing_clique:
for clique in _cliques:
if concept in not_list.keys() and not_list[concept] in clique._concepts:
containing_clique = clique
concept_in_clique = not_list[concept]
break
marginalized = [0, 0]
for val in [0,1]:
for index in containing_clique._get_multiple_indices({concept_in_clique:val}):
marginalized[val] += containing_clique._potential_table[index]
if concept_in_clique == concept:
all_marginalized[concept] = marginalized[1]
else:
all_marginalized[concept] = marginalized[0]
print(all_marginalized)
return all_marginalized
if __name__=='__main__':
#ubi = UGraph()
#ubi.start()
if len(argv) != 1:
print('Usage: python', argv[0])
exit(1)
if len(argv) == 1:
read_speech_data()
read_cooccurrences()
find_cliques()
fill_potential_tables()
#pretty_print()
| |
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module provides the Constraint class for handling
filters and pivots in a modular fashion. This enable easy
constrain application
What is a Constraint?
1. It is collection of data based on two rules:
a. A Pivot
b. A Set of Filters
For Example:
for a dataframe
Time CPU Latency
1 x <val>
2 y <val>
3 z <val>
4 a <val>
The resultant data will be for each unique pivot value with the filters applied
result["x"] = pd.Series.filtered()
result["y"] = pd.Series.filtered()
result["z"] = pd.Series.filtered()
result["a"] = pd.Series.filtered()
"""
# pylint: disable=R0913
from trappy.plotter.Utils import decolonize, listify, normalize_list
from trappy.plotter import AttrConf
class Constraint(object):
"""The constructor takes a filter and a pivot object,
The apply method takes a trappy Run object and a column
and applies the constraint on input object
"""
def __init__(
self, trappy_run, pivot, column, template, run_index, filters):
self._trappy_run = trappy_run
self._filters = filters
self._pivot = pivot
self._column = column
self._template = template
self._dup_resolved = False
self._data = self.populate_data_frame()
try:
self.result = self._apply()
except ValueError:
if not self._dup_resolved:
self._handle_duplicate_index()
try:
self.result = self._apply()
except:
raise ValueError("Unable to handle duplicates")
self.run_index = run_index
def _apply(self):
"""This method applies the filter on the resultant data
on the input column.
Do we need pivot_val?
"""
data = self._data
result = {}
try:
values = data[self._column]
except KeyError:
return result
if self._pivot == AttrConf.PIVOT:
criterion = values.map(lambda x: True)
for key in self._filters.keys():
if key in data.columns:
criterion = criterion & data[key].map(
lambda x: x in self._filters[key])
values = values[criterion]
result[AttrConf.PIVOT_VAL] = values
return result
pivot_vals = self.pivot_vals(data)
for pivot_val in pivot_vals:
criterion = values.map(lambda x: True)
for key in self._filters.keys():
if key != self._pivot and key in data.columns:
criterion = criterion & data[key].map(
lambda x: x in self._filters[key])
values = values[criterion]
val_series = values[data[self._pivot] == pivot_val]
if len(val_series) != 0:
result[pivot_val] = val_series
return result
def _handle_duplicate_index(self):
"""Handle duplicate values in index"""
data = self._data
self._dup_resolved = True
index = data.index
new_index = index.values
dups = index.get_duplicates()
for dup in dups:
# Leave one of the values intact
dup_index_left = index.searchsorted(dup, side="left")
dup_index_right = index.searchsorted(dup, side="right") - 1
num_dups = dup_index_right - dup_index_left + 1
delta = (index[dup_index_right + 1] - dup) / num_dups
if delta > AttrConf.DUPLICATE_VALUE_MAX_DELTA:
delta = AttrConf.DUPLICATE_VALUE_MAX_DELTA
# Add a delta to the others
dup_index_left += 1
while dup_index_left <= dup_index_right:
new_index[dup_index_left] += delta
delta += delta
dup_index_left += 1
self._data = self._data.reindex(new_index)
def _uses_trappy_run(self):
if not self._template:
return False
else:
return True
def populate_data_frame(self):
"""Return the data frame"""
if not self._uses_trappy_run():
return self._trappy_run
data_container = getattr(
self._trappy_run,
decolonize(self._template.name))
return data_container.data_frame
def pivot_vals(self, data):
"""This method returns the unique pivot values for the
Constraint's pivot and the column
"""
if self._pivot == AttrConf.PIVOT:
return AttrConf.PIVOT_VAL
if self._pivot not in data.columns:
return []
pivot_vals = set(data[self._pivot])
if self._pivot in self._filters:
pivot_vals = pivot_vals & set(self._filters[self._pivot])
return list(pivot_vals)
def __str__(self):
name = self.get_data_name()
if not self._uses_trappy_run():
return name + ":" + self._column
return name + ":" + \
self._template.name + ":" + self._column
def get_data_name(self):
"""Get name for the data Member"""
if self._uses_trappy_run():
if self._trappy_run.name != "":
return self._trappy_run.name
else:
return "Run {}".format(self.run_index)
else:
return "DataFrame {}".format(self.run_index)
class ConstraintManager(object):
"""A class responsible for converting inputs
to constraints and also ensuring sanity
"""
def __init__(self, runs, columns, templates, pivot, filters,
zip_constraints=True):
self._ip_vec = []
self._ip_vec.append(listify(runs))
self._ip_vec.append(listify(columns))
self._ip_vec.append(listify(templates))
self._lens = map(len, self._ip_vec)
self._max_len = max(self._lens)
self._pivot = pivot
self._filters = filters
self._constraints = []
self._run_expanded = False
self._expand()
if zip_constraints:
self._populate_zip_constraints()
else:
self._populate_constraints()
def _expand(self):
"""This is really important. We need to
meet the following criteria for constraint
expansion:
Len[runs] == Len[columns] == Len[templates]
OR
Permute(
Len[runs] = 1
Len[columns] = 1
Len[templates] != 1
}
Permute(
Len[runs] = 1
Len[columns] != 1
Len[templates] != 1
)
"""
min_len = min(self._lens)
max_pos_comp = [
i for i,
j in enumerate(
self._lens) if j != self._max_len]
if self._max_len == 1 and min_len != 1:
raise RuntimeError("Essential Arg Missing")
if self._max_len > 1:
# Are they all equal?
if len(set(self._lens)) == 1:
return
if min_len > 1:
raise RuntimeError("Cannot Expand a list of Constraints")
for val in max_pos_comp:
if val == 0:
self._run_expanded = True
self._ip_vec[val] = normalize_list(self._max_len,
self._ip_vec[val])
def _populate_constraints(self):
"""Populate the constraints creating one for each column in each run
In a multirun, multicolumn scenario, create constraints for
all the columns in each of the runs. _populate_constraints()
creates one constraint for the first run and first column, the
next for the second run and second column,... This function
creates a constraint for every combination of runs and columns
possible.
"""
for run_idx, run in enumerate(self._ip_vec[0]):
for col in self._ip_vec[1]:
template = self._ip_vec[2][run_idx]
constraint = Constraint(run, self._pivot, col, template,
run_idx, self._filters)
self._constraints.append(constraint)
def get_column_index(self, constraint):
return self._ip_vec[1].index(constraint._column)
def _populate_zip_constraints(self):
"""Populate the expanded constraints
In a multirun, multicolumn scenario, create constraints for
the first run and the first column, second run and second
column,... that is, as if you run zip(runs, columns)
"""
for idx in range(self._max_len):
if self._run_expanded:
run_idx = 0
else:
run_idx = idx
run = self._ip_vec[0][idx]
col = self._ip_vec[1][idx]
template = self._ip_vec[2][idx]
self._constraints.append(
Constraint(
run,
self._pivot,
col,
template,
run_idx,
self._filters))
def generate_pivots(self, permute=False):
"""Return a union of the pivot values"""
pivot_vals = []
for constraint in self._constraints:
pivot_vals += constraint.result.keys()
p_list = list(set(pivot_vals))
runs = range(self._lens[0])
try:
sorted_plist = sorted(p_list, key=int)
except ValueError, TypeError:
try:
sorted_plist = sorted(p_list, key=lambda x: int(x, 16))
except ValueError, TypeError:
sorted_plist = sorted(p_list)
if permute:
pivot_gen = ((run_idx, pivot) for run_idx in runs for pivot in sorted_plist)
return pivot_gen, len(sorted_plist) * self._lens[0]
else:
return sorted_plist, len(sorted_plist)
def constraint_labels(self):
"""Get the Str representation of the constraints"""
return map(str, self._constraints)
def __len__(self):
return len(self._constraints)
def __iter__(self):
return iter(self._constraints)
| |
"""
Support for Radio Thermostat wifi-enabled home thermostats.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.radiotherm/
"""
import datetime
import logging
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice, PLATFORM_SCHEMA
from homeassistant.components.climate.const import (
STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_IDLE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_OPERATION_MODE, SUPPORT_FAN_MODE, SUPPORT_AWAY_MODE)
from homeassistant.const import (
ATTR_TEMPERATURE, CONF_HOST, PRECISION_HALVES, TEMP_FAHRENHEIT, STATE_ON,
STATE_OFF)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['radiotherm==2.0.0']
_LOGGER = logging.getLogger(__name__)
ATTR_FAN = 'fan'
ATTR_MODE = 'mode'
CONF_HOLD_TEMP = 'hold_temp'
CONF_AWAY_TEMPERATURE_HEAT = 'away_temperature_heat'
CONF_AWAY_TEMPERATURE_COOL = 'away_temperature_cool'
DEFAULT_AWAY_TEMPERATURE_HEAT = 60
DEFAULT_AWAY_TEMPERATURE_COOL = 85
STATE_CIRCULATE = "circulate"
OPERATION_LIST = [STATE_AUTO, STATE_COOL, STATE_HEAT, STATE_OFF]
CT30_FAN_OPERATION_LIST = [STATE_ON, STATE_AUTO]
CT80_FAN_OPERATION_LIST = [STATE_ON, STATE_CIRCULATE, STATE_AUTO]
# Mappings from radiotherm json data codes to and from HASS state
# flags. CODE is the thermostat integer code and these map to and
# from HASS state flags.
# Programmed temperature mode of the thermostat.
CODE_TO_TEMP_MODE = {0: STATE_OFF, 1: STATE_HEAT, 2: STATE_COOL, 3: STATE_AUTO}
TEMP_MODE_TO_CODE = {v: k for k, v in CODE_TO_TEMP_MODE.items()}
# Programmed fan mode (circulate is supported by CT80 models)
CODE_TO_FAN_MODE = {0: STATE_AUTO, 1: STATE_CIRCULATE, 2: STATE_ON}
FAN_MODE_TO_CODE = {v: k for k, v in CODE_TO_FAN_MODE.items()}
# Active thermostat state (is it heating or cooling?). In the future
# this should probably made into heat and cool binary sensors.
CODE_TO_TEMP_STATE = {0: STATE_IDLE, 1: STATE_HEAT, 2: STATE_COOL}
# Active fan state. This is if the fan is actually on or not. In the
# future this should probably made into a binary sensor for the fan.
CODE_TO_FAN_STATE = {0: STATE_OFF, 1: STATE_ON}
def round_temp(temperature):
"""Round a temperature to the resolution of the thermostat.
RadioThermostats can handle 0.5 degree temps so the input
temperature is rounded to that value and returned.
"""
return round(temperature * 2.0) / 2.0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean,
vol.Optional(CONF_AWAY_TEMPERATURE_HEAT,
default=DEFAULT_AWAY_TEMPERATURE_HEAT):
vol.All(vol.Coerce(float), round_temp),
vol.Optional(CONF_AWAY_TEMPERATURE_COOL,
default=DEFAULT_AWAY_TEMPERATURE_COOL):
vol.All(vol.Coerce(float), round_temp),
})
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE |
SUPPORT_FAN_MODE | SUPPORT_AWAY_MODE)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Radio Thermostat."""
import radiotherm
hosts = []
if CONF_HOST in config:
hosts = config[CONF_HOST]
else:
hosts.append(radiotherm.discover.discover_address())
if hosts is None:
_LOGGER.error("No Radiotherm Thermostats detected")
return False
hold_temp = config.get(CONF_HOLD_TEMP)
away_temps = [
config.get(CONF_AWAY_TEMPERATURE_HEAT),
config.get(CONF_AWAY_TEMPERATURE_COOL)
]
tstats = []
for host in hosts:
try:
tstat = radiotherm.get_thermostat(host)
tstats.append(RadioThermostat(tstat, hold_temp, away_temps))
except OSError:
_LOGGER.exception("Unable to connect to Radio Thermostat: %s",
host)
add_entities(tstats, True)
class RadioThermostat(ClimateDevice):
"""Representation of a Radio Thermostat."""
def __init__(self, device, hold_temp, away_temps):
"""Initialize the thermostat."""
self.device = device
self._target_temperature = None
self._current_temperature = None
self._current_operation = STATE_IDLE
self._name = None
self._fmode = None
self._fstate = None
self._tmode = None
self._tstate = None
self._hold_temp = hold_temp
self._hold_set = False
self._away = False
self._away_temps = away_temps
self._prev_temp = None
# Fan circulate mode is only supported by the CT80 models.
import radiotherm
self._is_model_ct80 = isinstance(
self.device, radiotherm.thermostat.CT80)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
async def async_added_to_hass(self):
"""Register callbacks."""
# Set the time on the device. This shouldn't be in the
# constructor because it's a network call. We can't put it in
# update() because calling it will clear any temporary mode or
# temperature in the thermostat. So add it as a future job
# for the event loop to run.
self.hass.async_add_job(self.set_time)
@property
def name(self):
"""Return the name of the Radio Thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_FAN: self._fstate,
ATTR_MODE: self._tstate,
}
@property
def fan_list(self):
"""List of available fan modes."""
if self._is_model_ct80:
return CT80_FAN_OPERATION_LIST
return CT30_FAN_OPERATION_LIST
@property
def current_fan_mode(self):
"""Return whether the fan is on."""
return self._fmode
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
code = FAN_MODE_TO_CODE.get(fan_mode, None)
if code is not None:
self.device.fmode = code
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_operation(self):
"""Return the current operation. head, cool idle."""
return self._current_operation
@property
def operation_list(self):
"""Return the operation modes list."""
return OPERATION_LIST
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def is_away_mode_on(self):
"""Return true if away mode is on."""
return self._away
@property
def is_on(self):
"""Return true if on."""
return self._tstate != STATE_IDLE
def update(self):
"""Update and validate the data from the thermostat."""
# Radio thermostats are very slow, and sometimes don't respond
# very quickly. So we need to keep the number of calls to them
# to a bare minimum or we'll hit the HASS 10 sec warning. We
# have to make one call to /tstat to get temps but we'll try and
# keep the other calls to a minimum. Even with this, these
# thermostats tend to time out sometimes when they're actively
# heating or cooling.
# First time - get the name from the thermostat. This is
# normally set in the radio thermostat web app.
if self._name is None:
self._name = self.device.name['raw']
# Request the current state from the thermostat.
import radiotherm
try:
data = self.device.tstat['raw']
except radiotherm.validate.RadiothermTstatError:
_LOGGER.error('%s (%s) was busy (invalid value returned)',
self._name, self.device.host)
return
current_temp = data['temp']
# Map thermostat values into various STATE_ flags.
self._current_temperature = current_temp
self._fmode = CODE_TO_FAN_MODE[data['fmode']]
self._fstate = CODE_TO_FAN_STATE[data['fstate']]
self._tmode = CODE_TO_TEMP_MODE[data['tmode']]
self._tstate = CODE_TO_TEMP_STATE[data['tstate']]
self._current_operation = self._tmode
if self._tmode == STATE_COOL:
self._target_temperature = data['t_cool']
elif self._tmode == STATE_HEAT:
self._target_temperature = data['t_heat']
elif self._tmode == STATE_AUTO:
# This doesn't really work - tstate is only set if the HVAC is
# active. If it's idle, we don't know what to do with the target
# temperature.
if self._tstate == STATE_COOL:
self._target_temperature = data['t_cool']
elif self._tstate == STATE_HEAT:
self._target_temperature = data['t_heat']
else:
self._current_operation = STATE_IDLE
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = round_temp(temperature)
if self._current_operation == STATE_COOL:
self.device.t_cool = temperature
elif self._current_operation == STATE_HEAT:
self.device.t_heat = temperature
elif self._current_operation == STATE_AUTO:
if self._tstate == STATE_COOL:
self.device.t_cool = temperature
elif self._tstate == STATE_HEAT:
self.device.t_heat = temperature
# Only change the hold if requested or if hold mode was turned
# on and we haven't set it yet.
if kwargs.get('hold_changed', False) or not self._hold_set:
if self._hold_temp or self._away:
self.device.hold = 1
self._hold_set = True
else:
self.device.hold = 0
def set_time(self):
"""Set device time."""
# Calling this clears any local temperature override and
# reverts to the scheduled temperature.
now = datetime.datetime.now()
self.device.time = {
'day': now.weekday(),
'hour': now.hour,
'minute': now.minute
}
def set_operation_mode(self, operation_mode):
"""Set operation mode (auto, cool, heat, off)."""
if operation_mode in (STATE_OFF, STATE_AUTO):
self.device.tmode = TEMP_MODE_TO_CODE[operation_mode]
# Setting t_cool or t_heat automatically changes tmode.
elif operation_mode == STATE_COOL:
self.device.t_cool = self._target_temperature
elif operation_mode == STATE_HEAT:
self.device.t_heat = self._target_temperature
def turn_away_mode_on(self):
"""Turn away on.
The RTCOA app simulates away mode by using a hold.
"""
away_temp = None
if not self._away:
self._prev_temp = self._target_temperature
if self._current_operation == STATE_HEAT:
away_temp = self._away_temps[0]
elif self._current_operation == STATE_COOL:
away_temp = self._away_temps[1]
self._away = True
self.set_temperature(temperature=away_temp, hold_changed=True)
def turn_away_mode_off(self):
"""Turn away off."""
self._away = False
self.set_temperature(temperature=self._prev_temp, hold_changed=True)
| |
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2014, OVH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Except as contained in this notice, the name of OVH and or its trademarks
# (and among others RunAbove) shall not be used in advertising or otherwise to
# promote the sale, use or other dealings in this Software without prior
# written authorization from OVH.
import unittest
import json
import mock
import runabove
from sys import version_info
if version_info[0] >= 3:
unicode = str # Python 3 str replaces unicode
class TestContainerManager(unittest.TestCase):
"""Test storage using RunAbove API.
To play with objects we use swiftclient and we trust it's
tested properly :)
"""
name = 'test2'
region = 'SBG-1'
answer_list = '''[
{
"totalObjects": 5,
"name": "test",
"stored": 1024,
"region": "SBG-1"
},
{
"totalObjects": 0,
"name": "test2",
"stored": 0,
"region": "SBG-1"
}
]'''
answer_one = '''{
"totalObjects": 0,
"name": "test2",
"stored": 0,
"region": "SBG-1"
}'''
answer_token = '''{
"token": {
"catalog": [
{
"endpoints": [
{
"id": "af64asqa26fda457c0e974f3f",
"interface": "public",
"legacy_endpoint_id": "fa56f4as64c9a8f4asdf496",
"region": "SBG-1",
"url": "https://network.compute.sbg-1.runabove.io/"
},
{
"id": "5af5d46as48q911zs654fd69fc84",
"interface": "public",
"legacy_endpoint_id": "q984fSDFsa4654164asd98f42c",
"region": "BHS-1",
"url": "https://network.compute.bhs-1.runabove.io/"
}
],
"id": "022012d24e3c446948qwef6as135c68j7uy97",
"type": "network"
},
{
"endpoints": [
{
"id": "asf489a4f541q4f985s1f631a89a7ffd",
"interface": "public",
"legacy_endpoint_id": "f7a1afas65qfsASDc1456qf6",
"region": "BHS-1",
"url": "https://storage.bhs-1.runabove.io/v1/AUTH_fRs614a"
},
{
"id": "aq98465ASDG46543dfag46eg86eg1s32",
"interface": "public",
"legacy_endpoint_id": "fAFASd73251aplnxzq9899eb68c7",
"region": "SBG-1",
"url": "https://storage.sbg-1.runabove.io/v1/AUTH_4f6sa5df"
}
],
"id": "3c7237csdfasd45f4615a654dc9awd4f",
"type": "object-store"
}
],
"expires_at": "2014-07-05T10:40:02.799784Z",
"issued_at": "2014-07-04T10:40:02.799807Z"
},
"X-Auth-Token": "mbRArjDDI6fpZQRaxg98USPsz1fuK3Jl17ZHxb"
}'''
answer_token_empty = '''{
"token": {
"catalog": [],
"expires_at": "2014-07-05T10:40:02.799784Z",
"issued_at": "2014-07-04T10:40:02.799807Z"
},
"X-Auth-Token": "mbRArjDDI6fpZQRaxg98USPsz1fuK3Jl17ZHxb"
}'''
@mock.patch('runabove.wrapper_api')
@mock.patch('runabove.client')
def setUp(self, mock_wrapper, mock_client):
self.mock_wrapper = mock_wrapper
self.mock_client = mock_client
self.mock_client.regions = runabove.region.RegionManager(mock_wrapper,
mock_client)
self.containers = runabove.storage.ContainerManager(mock_wrapper,
mock_client)
def test_base_path(self):
self.assertEqual(self.containers.basepath, '/storage')
def test_list(self):
self.mock_wrapper.get.return_value = json.loads(self.answer_list)
container_list = self.containers.list()
self.mock_wrapper.get.assert_called_once_with(self.containers.basepath)
self.assertIsInstance(container_list, list)
self.assertEqual(len(container_list), 2)
for container in container_list:
self.assertIsInstance(container, runabove.storage.Container)
def test_list_by_region(self):
self.mock_wrapper.get.return_value = json.loads(self.answer_list)
container_list = self.containers.list_by_region(self.region)
self.mock_wrapper.get.assert_called_once_with(
self.containers.basepath,
{'region': self.region}
)
self.assertIsInstance(container_list, list)
self.assertEqual(len(container_list), 2)
for container in container_list:
self.assertIsInstance(container, runabove.storage.Container)
self.assertIsInstance(container.region, runabove.region.Region)
self.assertEqual(container.region.name, self.region)
@mock.patch('swiftclient.client.Connection')
def test_get_swift_client(self, mock_swiftclient):
mock_get_token = self.containers._handler.tokens.get
mock_get_token.return_value.auth_token = 'token'
mock_get_token.return_value.endpoint = 'http://url'
swift = self.containers._get_swift_client('REGION-1')
mock_get_token.assert_called_once_with()
mock_get_token.return_value.get_endpoint.assert_called_once_with('object-store', 'REGION-1')
self.assertIsInstance(swift, dict)
self.assertEqual(['client', 'endpoint'], sorted(swift.keys()))
@mock.patch('swiftclient.client.Connection')
def test_swift_call(self, mock_swiftclient):
swifts = {
'BHS-1': {
'client' : mock_swiftclient,
'endpoint' : 'http://endpoint'
}
}
self.containers._swifts = swifts
self.containers._swift_call('BHS-1', 'put_container')
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_get_by_name(self, mock_swift_call):
container = self.containers.get_by_name(self.region, self.name)
mock_swift_call.assert_called_once_with(
self.region,
'head_container',
self.name
)
self.assertIsInstance(container, runabove.storage.Container)
self.assertEqual(container.name, self.name)
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_delete(self, mock_swift_call):
self.containers.delete(self.region, self.name)
mock_swift_call.assert_called_once_with(
self.region,
'delete_container',
self.name
)
@mock.patch('runabove.storage.ContainerManager.get_by_name')
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_create_public(self, mock_swift_call, mock_get_by_name):
self.containers.create(self.region, self.name, public=True)
mock_swift_call.assert_called_once_with(
self.region,
'put_container',
self.name,
headers={'X-Container-Read': '.r:*,.rlistings'}
)
mock_get_by_name.assert_called_once_with(self.region, self.name)
@mock.patch('runabove.storage.ContainerManager.get_by_name')
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_create_private(self, mock_swift_call, mock_get_by_name):
self.containers.create(self.region, self.name)
mock_swift_call.assert_called_once_with(
self.region,
'put_container',
self.name,
headers={}
)
mock_get_by_name.assert_called_once_with(self.region, self.name)
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_set_public(self, mock_swift_call):
self.containers.set_public(self.region, self.name)
mock_swift_call.assert_called_once_with(
self.region,
'post_container',
self.name,
headers = {'X-Container-Read': '.r:*,.rlistings'}
)
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_set_public_with_private(self, mock_swift_call):
self.containers.set_public(self.region, self.name, public=False)
mock_swift_call.assert_called_once_with(
self.region,
'post_container',
self.name,
headers = {'X-Container-Read': ''}
)
@mock.patch('runabove.storage.ContainerManager.set_public')
def test_set_private(self, mock_set_public):
self.containers.set_private(self.region, self.name)
mock_set_public.assert_called_once_with(
self.region,
self.name,
public=False
)
def test_get_region_url(self):
swifts = {
'BHS-1': {
'endpoint' : 'http://endpoint'
}
}
self.containers.swifts = swifts
url = self.containers.get_region_url('BHS-1')
self.assertEqual(url, 'http://endpoint')
def test_get_region_url_not_found(self):
self.containers._swifts = {}
with self.assertRaises(runabove.exception.ResourceNotFoundError):
self.containers.get_region_url('BHS-1')
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_copy_object(self, mock_swift_call):
self.containers.copy_object(self.region, self.name, 'Test')
headers = {
'X-Copy-From': '/' + self.name + '/Test',
'content-length': 0
}
mock_swift_call.assert_called_once_with(
self.region,
'put_object',
self.name,
'Test',
None,
headers=headers
)
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_copy_object_other_container(self, mock_swift_call):
self.containers.copy_object(self.region, self.name, 'Test',
to_container='test1')
headers = {
'X-Copy-From': '/' + self.name + '/Test',
'content-length': 0
}
mock_swift_call.assert_called_once_with(
self.region,
'put_object',
'test1',
'Test',
None,
headers=headers
)
@mock.patch('runabove.storage.ContainerManager._get_swift_client')
def test_swifts(self, mock_get_swift_client):
mock_get_swift_client.return_value = {}
swifts = self.containers.swifts
mock_get_swift_client.assert_called_once()
self.assertIsInstance(swifts, dict)
class TestContainer(unittest.TestCase):
container_name = 'MyTestContainer'
answer_list = '''[
[""],
[
{
"name": "obj1",
"bytes": 20,
"last_modified": "Thu, 31 Jul 2014 07:57:30 GMT",
"content_type": "image/png"
},
{
"name": "obj2",
"bytes": 26,
"last_modified": "Thu, 31 Jul 2014 07:58:30 GMT",
"content_type": "image/png"
}
]
]'''
answer_head_object = {
'content-length': '0',
'accept-ranges': 'bytes',
'last-modified': 'Thu, 31 Jul 2014 07:57:30 GMT',
'connection': 'close',
'etag': 'd41d8cd99f00b204e9800998ecf8427f',
'x-timestamp': '1406793450.95376',
'x-trans-id': 'txbcbed42b0efd46a7aace3-0054da0217',
'date': 'Thu, 31 Jul 2014 08:45:11 GMT',
'content-type': 'application/octet-stream'
}
answer_get_object = (
{
'content-length': '0',
'accept-ranges': 'bytes',
'last-modified': 'Thu, 31 Jul 2014 07:57:30 GMT',
'connection': 'close',
'etag': 'd41d8cd99f00b204e9800998ecf8427f',
'x-timestamp': '1406793450.95376',
'x-trans-id': 'txbcbed42b0efd46a7aace3-0054da0217',
'date': 'Thu, 31 Jul 2014 08:45:11 GMT',
'content-type': 'application/octet-stream'
},
'data'
)
@mock.patch('runabove.region.Region')
@mock.patch('runabove.storage.ContainerManager')
def setUp(self, mock_containers, mock_region):
self.mock_containers = mock_containers
self.mock_region = mock_region
self.container = runabove.storage.Container(
self.mock_containers,
self.container_name,
self.mock_region,
meta=None
)
def test_list_objects(self):
answer = json.loads(self.answer_list)
self.mock_containers._swift_call.return_value = answer
object_list = self.container.list_objects()
self.mock_containers._swift_call.assert_called_once_with(
self.mock_region.name,
'get_container',
self.container_name,
full_listing=True
)
self.assertIsInstance(object_list, list)
self.assertEqual(len(object_list), 2)
for obj in object_list:
self.assertIsInstance(obj, runabove.storage.ObjectStored)
def _get_object_by_name(self, download=False):
swift_answer = self.answer_head_object
call = 'head_object'
if download:
swift_answer = self.answer_get_object
call = 'get_object'
self.mock_containers._swift_call.return_value = swift_answer
obj = self.container.get_object_by_name('TestObj', download)
self.mock_containers._swift_call.assert_called_once_with(
self.mock_region.name,
call,
self.container_name,
'TestObj'
)
self.assertIsInstance(obj, runabove.storage.ObjectStored)
if download:
self.assertEqual(obj._data, 'data')
else:
self.assertEqual(obj._data, None)
def test_get_object_by_name_without_download(self):
self._get_object_by_name()
def test_get_object_by_name_with_download(self):
self._get_object_by_name(download=True)
def test_delete(self):
self.container.delete()
self.mock_containers.delete.assert_called_once_with(
self.mock_region,
self.container
)
def test_delete_object(self):
self.container.delete_object('Test')
self.mock_containers._swift_call.assert_called_once_with(
self.mock_region,
'delete_object',
self.container.name,
'Test'
)
@mock.patch('runabove.storage.Container.get_object_by_name')
def test_create_object(self, mock_get_object_by_name):
obj = self.container.create_object('Test', 'content')
self.mock_containers._swift_call.assert_called_once_with(
self.mock_region.name,
'put_object',
self.container.name,
'Test',
'content',
headers=None
)
@mock.patch('runabove.storage.ObjectStored')
def test_copy(self, mock_obj):
to_container = 'CopyTo'
new_object_name = 'NewName'
self.container.copy_object(mock_obj, to_container, new_object_name)
self.mock_containers.copy_object.assert_called_once_with(
self.mock_region.name,
self.container,
mock_obj,
to_container,
new_object_name
)
def test_set_public(self):
self.container.set_public()
self.mock_containers.set_public.assert_called_once_with(
self.mock_region.name,
self.container
)
def test_set_private(self):
self.container.set_private()
self.mock_containers.set_private.assert_called_once_with(
self.mock_region.name,
self.container
)
def test_url(self):
base_url = 'https://url-of-endpoint'
self.mock_containers.get_region_url.return_value = base_url
url = self.container.url
self.mock_containers.get_region_url.assert_called_once_with(
self.mock_region.name
)
self.assertEqual(url, base_url + '/' + self.container_name)
@mock.patch('runabove.storage.Container')
def test_get_meta(self, mock_cnt):
fake_meta = {'X-meta': 'meta'}
mock_cnt._meta = fake_meta
self.mock_containers.get_by_name.return_value = mock_cnt
meta = self.container.meta
self.mock_containers.get_by_name.assert_called_once_with(
self.mock_region.name,
self.container.name,
list_objects=False
)
self.assertEqual(meta, fake_meta)
def test_set_meta(self):
fake_meta = {'X-meta': 'meta'}
self.container.meta = fake_meta
class TestObjectStored(unittest.TestCase):
obj_name = 'MyTestObject'
@mock.patch('runabove.storage.Container')
def setUp(self, mock_container):
self.mock_container = mock_container
self.obj = runabove.storage.ObjectStored(
self.mock_container,
self.obj_name
)
@mock.patch('runabove.storage.ObjectStored')
def test_data(self, mock_obj):
fake_data = 'SomeData'
mock_obj._data = fake_data
self.mock_container.get_object_by_name.return_value = mock_obj
data = self.obj.data
self.mock_container.get_object_by_name.assert_called_once_with(
self.obj.name,
download=True
)
self.assertEqual(data, fake_data)
@mock.patch('runabove.storage.ObjectStored')
def test_get_meta(self, mock_obj):
fake_meta = {'X-meta': 'meta'}
mock_obj._meta = fake_meta
self.mock_container.get_object_by_name.return_value = mock_obj
meta = self.obj.meta
self.mock_container.get_object_by_name.assert_called_once_with(
self.obj.name,
download=False
)
self.assertEqual(meta, fake_meta)
@mock.patch('runabove.storage.ContainerManager._swift_call')
def test_set_meta(self, mock_swift_call):
fake_meta = {'X-meta': 'meta'}
self.obj.meta = fake_meta
@mock.patch('runabove.storage.ObjectStored')
def test_data_already_downloaded(self, mock_obj):
fake_data = 'SomeData'
self.obj._data = fake_data
data = self.obj.data
self.mock_container.get_object_by_name.assert_not_called()
self.assertEqual(data, fake_data)
def test_url(self):
base_url = 'https://url-of-endpoint/containerName'
self.mock_container.url = base_url
url = self.obj.url
self.assertEqual(url, base_url + '/' + self.obj_name)
def test_delete(self):
self.obj.delete()
self.mock_container.delete_object.assert_called_once_with(
self.obj
)
def test_copy(self):
to_container = 'CopyTo'
new_object_name = 'NewName'
self.obj.copy(to_container, new_object_name)
self.mock_container.copy_object.assert_called_once_with(
self.obj,
to_container,
new_object_name
)
if __name__ == '__main__':
unittest.main()
| |
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default':{
'ENGINE': 'django.db.backends.mysql',
'NAME': 'merry',
'USER': 'root',
'PASSWORD': '1qaz,2wsx',
'HOST': 'localhost',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'Asia/Shanghai'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"9u=i3586dkyn3!2d@x(a*asxs7c30q4-p6k8u8zji&-74!!j^b"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## AUTHENTICATION_BACKENDS
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
)
########## AUTHENTICATION_BACKENDS
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
normpath(join(SITE_ROOT, 'templates')),
)
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'johnny.middleware.LocalStoreClearMiddleware',
'johnny.middleware.QueryCacheMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## SOME JOHNNYCACHE SETTING
CACHES = {
'default' : dict(
BACKEND = 'johnny.backends.memcached.MemcachedCache',
LOCATION = ['127.0.0.1:11211'],
JOHNNY_CACHE = True,
)
}
JOHNNY_MIDDLEWARE_KEY_PREFIX='jc_myproj'
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'debug_toolbar',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'userena',
'guardian',
'easy_thumbnails',
'accounts',
'booking',
'captcha',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## SOUTH CONFIGURATION
# See: http://south.readthedocs.org/en/latest/installation.html#configuring-your-django-installation
INSTALLED_APPS += (
# Database migration helpers:
'south',
)
# INTERNAL_IPS = ('127.0.0.1',)
# Don't need to use South when setting up a test database.
SOUTH_TESTS_MIGRATE = False
########## END SOUTH CONFIGURATION
ANONYMOUS_USER_ID = -1
AUTH_PROFILE_MODULE = 'accounts.MyProfile'
#### tencent email backend ####
TENCENT_EMAIL_HOST = "smtp.exmail.qq.com"
TENCENT_EMAIL_PORT = 25
TENCENT_EMAIL_USER = "info@merryservices.com"
TENCENT_FROM_EMAIL = "Merry Services <info@merryservices.com>"
TENCENT_EMAIL_PASSWORD = "1qaz,2wsx"
SITE_HOST = 'www.merryservices.com'
EMAIL_HOST = 'smtp.exmail.qq.com'
EMAIL_PORT = 25
EMAIL_HOST_USER = "info@merryservices.com"
EMAIL_HOST_PASSWORD = "1qaz,2wsx"
DEFAULT_FROM_EMAIL = "Merry Services <info@merryservices.com>"
INTERNAL_BOOKINGS_EMAIL = "bookings@merryservices.com"
CAPTCHA_NOISE_FUNCTIONS = ()
CAPTCHA_OUTPUT_FORMAT = u'<font>%(image)s</font> %(hidden_field)s %(text_field)s'
CAPTCHA_LENGTH = 5
#CAPTCHA_BACKGROUND_COLOR = '#55DCCD'
SMS_USER = "56293"
SMS_PWD= "57bb88e2680b89a2046383a815ede6b5"
#### alipay ####
ALIPAY_PID = "2088511010209762"
ALIPAY_KEY = "293inmjpu54yik731sbgo2gmjwlmvgu4"
ALIPAY_SELLER_EMAIL = "services@merryservices.com"
| |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import unittest
from pattern import graph
from pattern.graph import commonsense
from builtins import str, bytes, int, dict
from builtins import map, zip, filter
from builtins import object, range
#---------------------------------------------------------------------------------------------------
class TestUtilityFunctions(unittest.TestCase):
def setUp(self):
pass
def test_deepcopy(self):
# Object with a copy() method are responsible for deep-copying themselves.
class MyObject(object):
def __init__(self, i):
self.i = i
def copy(self):
return MyObject(graph.deepcopy(self.i))
# Assert deep copy for different types.
for o1 in (
None, True, False,
"a",
1, 1.0, int(1), complex(1),
list([1]), tuple([1]), set([1]), frozenset([1]),
dict(a=1), {frozenset(["a"]): 1}, {MyObject(1): 1},
MyObject(1)):
o2 = graph.deepcopy(o1)
if isinstance(o2, (list, tuple, set, dict, MyObject)):
self.assertTrue(id(o1) != id(o2))
print("pattern.graph.deepcopy()")
def test_unique(self):
# Assert list copy with unique items.
v = graph.unique([1, 1, 1])
self.assertEqual(len(v), 1)
self.assertEqual(v[0], 1)
print("pattern.graph.unique()")
def test_coordinates(self):
# Assert 2D coordinates.
x, y = graph.coordinates(10, 10, 100, 30)
self.assertAlmostEqual(x, 96.60, places=2)
self.assertAlmostEqual(y, 60.00, places=2)
print("pattern.graph.coordinates()")
#---------------------------------------------------------------------------------------------------
class TestNode(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_node("a", radius=5, stroke=(0, 0, 0, 1), strokewidth=1, fill=None, text=(0, 0, 0, 1))
self.g.add_node("b", radius=5)
self.g.add_node("c", radius=5)
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_node(self):
# Assert node properties.
n = self.g["a"]
self.assertTrue(isinstance(n, graph.Node))
self.assertTrue(n == self.g["a"])
self.assertTrue(n != self.g["b"])
self.assertTrue(n.graph == self.g)
self.assertTrue(n._distance == self.g.distance)
self.assertTrue(n.id == "a")
self.assertTrue(n.x == 0.0)
self.assertTrue(n.y == 0.0)
self.assertTrue(n.force.x == graph.Vector(0.0, 0.0).x)
self.assertTrue(n.force.y == graph.Vector(0.0, 0.0).y)
self.assertTrue(n.radius == 5)
self.assertTrue(n.fill is None)
self.assertTrue(n.stroke == (0, 0, 0, 1))
self.assertTrue(n.strokewidth == 1)
self.assertTrue(n.text.string == "a")
self.assertTrue(n.text.width == 85)
self.assertTrue(n.text.fill == (0, 0, 0, 1))
self.assertTrue(n.text.fontsize == 11)
self.assertTrue(n.fixed == False)
self.assertTrue(n.weight == 0)
self.assertTrue(n.centrality == 0)
print("pattern.graph.Node")
def test_edge(self):
# Assert node edges.
n1 = self.g["a"]
n2 = self.g["b"]
self.assertTrue(n1.edges[0].node1.id == "a")
self.assertTrue(n1.edges[0].node2.id == "b")
self.assertTrue(n1.links[0].id == "b")
self.assertTrue(n1.links[0] == self.g.edges[0].node2)
self.assertTrue(n1.links.edge("b") == self.g.edges[0])
self.assertTrue(n1.links.edge(n2) == self.g.edges[0])
print("pattern.graph.Node.links")
print("pattern.graph.Node.edges")
def test_flatten(self):
# Assert node spreading activation.
n = self.g["a"]
self.assertTrue(set(n.flatten(depth=0)) == set([n]))
self.assertTrue(set(n.flatten(depth=1)) == set([n, n.links[0]]))
self.assertTrue(set(n.flatten(depth=2)) == set(self.g.nodes))
print("pattern.graph.Node.flatten()")
def test_text(self):
n = self.g.add_node("d", text=None)
self.assertTrue(n.text is None)
print("pattern.graph.Node.text")
#---------------------------------------------------------------------------------------------------
class TestEdge(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_node("a")
self.g.add_node("b")
self.g.add_edge("a", "b", weight=0.0, length=1.0, type="is-a", stroke=(0, 0, 0, 1), strokewidth=1)
def test_edge(self):
# Assert edge properties.
e = self.g.edges[0]
self.assertTrue(isinstance(e, graph.Edge))
self.assertTrue(e.node1 == self.g["a"])
self.assertTrue(e.node2 == self.g["b"])
self.assertTrue(e.weight == 0.0)
self.assertTrue(e.length == 1.0)
self.assertTrue(e.type == "is-a")
self.assertTrue(e.stroke == (0, 0, 0, 1))
self.assertTrue(e.strokewidth == 1)
print("pattern.graph.Edge")
#---------------------------------------------------------------------------------------------------
class TestGraph(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph(layout=graph.SPRING, distance=10.0)
self.g.add_node("a")
self.g.add_node("b")
self.g.add_node("c")
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_graph(self):
# Assert graph properties.
g = self.g.copy()
self.assertTrue(len(g.nodes) == 3)
self.assertTrue(len(g.edges) == 2)
self.assertTrue(g.distance == 10.0)
self.assertTrue(g.density == 2 / 3.0)
self.assertTrue(g.is_complete == False)
self.assertTrue(g.is_sparse == False)
self.assertTrue(g.is_dense)
self.assertTrue(g._adjacency is None)
self.assertTrue(isinstance(g.layout, graph.GraphLayout))
self.assertTrue(isinstance(g.layout, graph.GraphSpringLayout))
print("pattern.graph.Graph")
def test_graph_nodes(self):
# Assert graph nodes.
g = self.g.copy()
g.append(graph.Node, "d")
g.add_node("e", base=graph.Node, root=True)
self.assertTrue("d" in g)
self.assertTrue("e" in g)
self.assertTrue(g.root == g["e"])
self.assertTrue(g["e"] == g.node("e") == g.nodes[-1])
g.remove(g["d"])
g.remove(g["e"])
self.assertTrue("d" not in g)
self.assertTrue("e" not in g)
print("pattern.graph.Graph.add_node()")
def test_graph_edges(self):
# Assert graph edges.
g = self.g.copy()
v1 = g.add_edge("d", "e") # Automatically create Node(d) and Node(e).
v2 = g.add_edge("d", "e") # Yields existing edge.
v3 = g.add_edge("e", "d") # Opposite direction.
self.assertEqual(v1, v2)
self.assertEqual(v2, g.edge("d", "e"))
self.assertEqual(v3, g.edge("e", "d"))
self.assertEqual(g["d"].links.edge(g["e"]), v2)
self.assertEqual(g["e"].links.edge(g["d"]), v3)
g.remove(g["d"])
g.remove(g["e"])
# Edges d->e and e->d should now be removed automatically.
self.assertEqual(len(g.edges), 2)
print("pattern.graph.Graph.add_edge()")
def test_cache(self):
# Assert adjacency cache is flushed when nodes, edges or direction changes.
g = self.g.copy()
g.eigenvector_centrality()
self.assertEqual(g._adjacency[0]["a"], {})
self.assertEqual(g._adjacency[0]["b"]["a"], 1.0)
g.add_node("d")
g.add_node("e")
self.assertEqual(g._adjacency, None)
g.betweenness_centrality()
self.assertEqual(g._adjacency[0]["a"]["b"], 1.0)
self.assertEqual(g._adjacency[0]["b"]["a"], 1.0)
g.add_edge("d", "e", weight=0.0)
g.remove(g.node("d"))
g.remove(g.node("e"))
print("pattern.graph.Graph._adjacency")
def test_paths(self):
# Assert node paths.
g = self.g.copy()
self.assertEqual(g.paths("a", "c"), g.paths(g["a"], g["c"]))
self.assertEqual(g.paths("a", "c"), [[g["a"], g["b"], g["c"]]])
self.assertEqual(g.paths("a", "c", length=2), [])
# Assert node shortest paths.
g.add_edge("a", "c")
self.assertEqual(g.paths("a", "c", length=2), [[g["a"], g["c"]]])
self.assertEqual(g.shortest_path("a", "c"), [g["a"], g["c"]])
self.assertEqual(g.shortest_path("c", "a"), [g["c"], g["a"]])
self.assertEqual(g.shortest_path("c", "a", directed=True), None)
g.remove(g.edge("a", "c"))
g.add_node("d")
self.assertEqual(g.shortest_path("a", "d"), None)
self.assertEqual(g.shortest_paths("a")["b"], [g["a"], g["b"]])
self.assertEqual(g.shortest_paths("a")["c"], [g["a"], g["b"], g["c"]])
self.assertEqual(g.shortest_paths("a")["d"], None)
self.assertEqual(g.shortest_paths("c", directed=True)["a"], None)
g.remove(g["d"])
print("pattern.graph.Graph.paths()")
print("pattern.graph.Graph.shortest_path()")
print("pattern.graph.Graph.shortest_paths()")
def test_eigenvector_centrality(self):
# Assert eigenvector centrality.
self.assertEqual(self.g["a"]._weight, None)
v = self.g.eigenvector_centrality()
self.assertTrue(isinstance(v["a"], float))
self.assertTrue(v["a"] == v[self.g.node("a")])
self.assertTrue(v["a"] < v["c"])
self.assertTrue(v["b"] < v["c"])
print("pattern.graph.Graph.eigenvector_centrality()")
def test_betweenness_centrality(self):
# Assert betweenness centrality.
self.assertEqual(self.g["a"]._centrality, None)
v = self.g.betweenness_centrality()
self.assertTrue(isinstance(v["a"], float))
self.assertTrue(v["a"] == v[self.g.node("a")])
self.assertTrue(v["a"] < v["b"])
self.assertTrue(v["c"] < v["b"])
print("pattern.graph.Graph.betweenness_centrality()")
def test_sorted(self):
# Assert graph node sorting
o1 = self.g.sorted(order=graph.WEIGHT, threshold=0.0)
o2 = self.g.sorted(order=graph.CENTRALITY, threshold=0.0)
self.assertEqual(o1[0], self.g["c"])
self.assertEqual(o2[0], self.g["b"])
print("pattern.graph.Graph.sorted()")
def test_prune(self):
# Assert leaf pruning.
g = self.g.copy()
g.prune(1)
self.assertEqual(len(g), 1)
self.assertEqual(g.nodes, [g["b"]])
print("pattern.graph.Graph.prune()")
def test_fringe(self):
# Assert leaf fetching.
g = self.g.copy()
self.assertEqual(g.fringe(0), [g["a"], g["c"]])
self.assertEqual(g.fringe(1), [g["a"], g["b"], g["c"]])
print("pattern.graph.Graph.fringe()")
def test_split(self):
# Asset subgraph splitting.
self.assertTrue(isinstance(self.g.split(), list))
self.assertTrue(isinstance(self.g.split()[0], graph.Graph))
print("pattern.graph.Graph.split()")
def test_update(self):
# Assert node position after updating layout algorithm.
self.g.update()
for n in self.g.nodes:
self.assertTrue(n.x != 0)
self.assertTrue(n.y != 0)
self.g.layout.reset()
for n in self.g.nodes:
self.assertTrue(n.x == 0)
self.assertTrue(n.y == 0)
print("pattern.graph.Graph.update()")
def test_copy(self):
# Assert deep copy of Graph.
g1 = self.g
g2 = self.g.copy()
self.assertTrue(set(g1) == set(g2)) # Same node id's.
self.assertTrue(id(g1["a"]) != id(g2["b"])) # Different node objects.
g3 = self.g.copy(nodes=[self.g["a"], self.g["b"]])
g3 = self.g.copy(nodes=["a", "b"])
self.assertTrue(len(g3.nodes), 2)
self.assertTrue(len(g3.edges), 1)
# Assert copy with subclasses of Node and Edge.
class MyNode(graph.Node):
pass
class MyEdge(graph.Edge):
pass
g4 = graph.Graph()
g4.append(MyNode, "a")
g4.append(MyNode, "b")
g4.append(MyEdge, "a", "b")
g4 = g4.copy()
self.assertTrue(isinstance(g4.nodes[0], MyNode))
self.assertTrue(isinstance(g4.edges[0], MyEdge))
print("pattern.graph.Graph.copy()")
#---------------------------------------------------------------------------------------------------
class TestGraphLayout(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph(layout=graph.SPRING, distance=10.0)
self.g.add_node("a")
self.g.add_node("b")
self.g.add_node("c")
self.g.add_edge("a", "b")
self.g.add_edge("b", "c")
def test_layout(self):
# Assert GraphLayout properties.
gl = graph.GraphLayout(graph=self.g)
self.assertTrue(gl.graph == self.g)
self.assertTrue(gl.bounds == (0, 0, 0, 0))
self.assertTrue(gl.iterations == 0)
gl.update()
self.assertTrue(gl.iterations == 1)
print("pattern.graph.GraphLayout")
class TestGraphSpringLayout(TestGraphLayout):
def test_layout(self):
# Assert GraphSpringLayout properties.
gl = self.g.layout
self.assertTrue(gl.graph == self.g)
self.assertTrue(gl.k == 4.0)
self.assertTrue(gl.force == 0.01)
self.assertTrue(gl.repulsion == 50)
self.assertTrue(gl.bounds == (0, 0, 0, 0))
self.assertTrue(gl.iterations == 0)
gl.update()
self.assertTrue(gl.iterations == 1)
self.assertTrue(gl.bounds[0] < 0)
self.assertTrue(gl.bounds[1] < 0)
self.assertTrue(gl.bounds[2] > 0)
self.assertTrue(gl.bounds[3] > 0)
print("pattern.graph.GraphSpringLayout")
def test_distance(self):
# Assert 2D distance.
n1 = graph.Node()
n2 = graph.Node()
n1.x = -100
n2.x = +100
d = self.g.layout._distance(n1, n2)
self.assertEqual(d, (200.0, 0.0, 200.0, 40000.0))
print("pattern.graph.GraphSpringLayout._distance")
def test_repulsion(self):
# Assert repulsive node force.
gl = self.g.layout
d1 = gl._distance(self.g["a"], self.g["c"])[2]
gl.update()
d2 = gl._distance(self.g["a"], self.g["c"])[2]
self.assertTrue(d2 > d1)
self.g.layout.reset()
print("pattern.graph.GraphSpringLayout._repulse()")
def test_attraction(self):
# Assert attractive edge force.
gl = self.g.layout
self.g["a"].x = -100
self.g["b"].y = +100
d1 = gl._distance(self.g["a"], self.g["b"])[2]
gl.update()
d2 = gl._distance(self.g["a"], self.g["b"])[2]
self.assertTrue(d2 < d1)
print("pattern.graph.GraphSpringLayout._attract()")
#---------------------------------------------------------------------------------------------------
class TestGraphTraversal(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_edge("a", "b", weight=0.5)
self.g.add_edge("a", "c")
self.g.add_edge("b", "d")
self.g.add_edge("d", "e")
self.g.add_node("x")
def test_search(self):
# Assert depth-first vs. breadth-first search.
def visit(node):
a.append(node)
def traversable(node, edge):
if edge.node2.id == "e":
return False
g = self.g
a = []
graph.depth_first_search(g["a"], visit, traversable)
self.assertEqual(a, [g["a"], g["b"], g["d"], g["c"]])
a = []
graph.breadth_first_search(g["a"], visit, traversable)
self.assertEqual(a, [g["a"], g["b"], g["c"], g["d"]])
print("pattern.graph.depth_first_search()")
print("pattern.graph.breadth_first_search()")
def test_paths(self):
# Assert depth-first all paths.
g = self.g.copy()
g.add_edge("a", "d")
for id1, id2, length, path in (
("a", "a", 1, [["a"]]),
("a", "d", 3, [["a", "d"], ["a", "b", "d"]]),
("a", "d", 2, [["a", "d"]]),
("a", "d", 1, []),
("a", "x", 1, [])):
p = graph.paths(g, id1, id2, length)
self.assertEqual(p, path)
print("pattern.graph.paths()")
def test_edges(self):
# Assert path of nodes to edges.
g = self.g
p = [g["a"], g["b"], g["d"], g["x"]]
e = list(graph.edges(p))
self.assertEqual(e, [g.edge("a", "b"), g.edge("b", "d"), None])
print("pattern.graph.edges()")
def test_adjacency(self):
# Assert adjacency map with different settings.
a = [
graph.adjacency(self.g),
graph.adjacency(self.g, directed=True),
graph.adjacency(self.g, directed=True, reversed=True),
graph.adjacency(self.g, stochastic=True),
graph.adjacency(self.g, heuristic=lambda id1, id2: 0.1),
]
for i in range(len(a)):
a[i] = sorted((id1, sorted((id2, round(w, 2)) for id2, w in p.items())) for id1, p in a[i].items())
self.assertEqual(a[0], [
("a", [("b", 0.75), ("c", 1.0)]),
("b", [("a", 0.75), ("d", 1.0)]),
("c", [("a", 1.0)]),
("d", [("b", 1.0), ("e", 1.0)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[1], [
("a", [("b", 0.75), ("c", 1.0)]),
("b", [("d", 1.0)]),
("c", []),
("d", [("e", 1.0)]),
("e", []),
("x", [])])
self.assertEqual(a[2], [
("a", []),
("b", [("a", 0.75)]),
("c", [("a", 1.0)]),
("d", [("b", 1.0)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[3], [
("a", [("b", 0.43), ("c", 0.57)]),
("b", [("a", 0.43), ("d", 0.57)]),
("c", [("a", 1.0)]),
("d", [("b", 0.5), ("e", 0.5)]),
("e", [("d", 1.0)]),
("x", [])])
self.assertEqual(a[4], [
("a", [("b", 0.85), ("c", 1.1)]),
("b", [("a", 0.85), ("d", 1.1)]),
("c", [("a", 1.1)]),
("d", [("b", 1.1), ("e", 1.1)]),
("e", [("d", 1.1)]),
("x", [])])
print("pattern.graph.adjacency()")
def test_dijkstra_shortest_path(self):
# Assert Dijkstra's algorithm (node1 -> node2).
g = self.g.copy()
g.add_edge("d", "a")
for id1, id2, heuristic, directed, path in (
("a", "d", None, False, ["a", "d"]),
("a", "d", None, True, ["a", "b", "d"]),
("a", "d", lambda id1, id2: id1 == "d" and id2 == "a" and 1 or 0, False, ["a", "b", "d"])):
p = graph.dijkstra_shortest_path(g, id1, id2, heuristic, directed)
self.assertEqual(p, path)
print("pattern.graph.dijkstra_shortest_path()")
def test_dijkstra_shortest_paths(self):
# Assert Dijkstra's algorithm (node1 -> all).
g = self.g.copy()
g.add_edge("d", "a")
a = [
graph.dijkstra_shortest_paths(g, "a"),
graph.dijkstra_shortest_paths(g, "a", directed=True),
graph.dijkstra_shortest_paths(g, "a", heuristic=lambda id1, id2: id1 == "d" and id2 == "a" and 1 or 0)
]
for i in range(len(a)):
a[i] = sorted(a[i].items())
self.assertEqual(a[0], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "d"]),
("e", ["a", "d", "e"]),
("x", None)])
self.assertEqual(a[1], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "b", "d"]),
("e", ["a", "b", "d", "e"]),
("x", None)])
self.assertEqual(a[2], [
("a", ["a"]),
("b", ["a", "b"]),
("c", ["a", "c"]),
("d", ["a", "b", "d"]),
("e", ["a", "b", "d", "e"]),
("x", None)])
print("pattern.graph.dijkstra_shortest_paths()")
def test_floyd_warshall_all_pairs_distance(self):
# Assert all pairs path distance.
p1 = graph.floyd_warshall_all_pairs_distance(self.g)
p2 = sorted((id1, sorted((id2, round(w, 2)) for id2, w in p.items())) for id1, p in p1.items())
self.assertEqual(p2, [
("a", [("a", 0.00), ("b", 0.75), ("c", 1.00), ("d", 1.75), ("e", 2.75)]),
("b", [("a", 0.75), ("b", 0.00), ("c", 1.75), ("d", 1.00), ("e", 2.00)]),
("c", [("a", 1.00), ("b", 1.75), ("c", 2.00), ("d", 2.75), ("e", 3.75)]),
("d", [("a", 1.75), ("b", 1.00), ("c", 2.75), ("d", 0.00), ("e", 1.00)]),
("e", [("a", 2.75), ("b", 2.00), ("c", 3.75), ("d", 1.00), ("e", 2.00)]),
("x", [])])
# Assert predecessor tree.
self.assertEqual(graph.predecessor_path(p1.predecessors, "a", "d"), ["a", "b", "d"])
print("pattern.graph.floyd_warshall_all_pairs_distance()")
#---------------------------------------------------------------------------------------------------
class TestGraphPartitioning(unittest.TestCase):
def setUp(self):
# Create test graph.
self.g = graph.Graph()
self.g.add_edge("a", "b", weight=0.5)
self.g.add_edge("a", "c")
self.g.add_edge("b", "d")
self.g.add_edge("d", "e")
self.g.add_edge("x", "y")
self.g.add_node("z")
def test_union(self):
self.assertEqual(graph.union([1, 2], [2, 3]), [1, 2, 3])
def test_intersection(self):
self.assertEqual(graph.intersection([1, 2], [2, 3]), [2])
def test_difference(self):
self.assertEqual(graph.difference([1, 2], [2, 3]), [1])
def test_partition(self):
# Assert unconnected subgraph partitioning.
g = graph.partition(self.g)
self.assertTrue(len(g) == 3)
self.assertTrue(isinstance(g[0], graph.Graph))
self.assertTrue(sorted(g[0].keys()), ["a", "b", "c", "d", "e"])
self.assertTrue(sorted(g[1].keys()), ["x", "y"])
self.assertTrue(sorted(g[2].keys()), ["z"])
print("pattern.graph.partition()")
def test_clique(self):
# Assert node cliques.
v = graph.clique(self.g, "a")
self.assertEqual(v, ["a", "b"])
self.g.add_edge("b", "c")
v = graph.clique(self.g, "a")
self.assertEqual(v, ["a", "b", "c"])
v = graph.cliques(self.g, 2)
self.assertEqual(v, [["a", "b", "c"], ["b", "d"], ["d", "e"], ["x", "y"]])
print("pattern.graph.clique()")
print("pattern.graph.cliques()")
#---------------------------------------------------------------------------------------------------
class TestGraphMaintenance(unittest.TestCase):
def setUp(self):
pass
def test_unlink(self):
# Assert remove all edges to/from Node(a).
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("a", "c")
graph.unlink(g, g["a"])
self.assertTrue(len(g.edges) == 0)
# Assert remove edges between Node(a) and Node(b)
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("a", "c")
graph.unlink(g, g["a"], "b")
self.assertTrue(len(g.edges) == 1)
print("pattern.graph.unlink()")
def test_redirect(self):
# Assert transfer connections of Node(a) to Node(d).
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("c", "a")
g.add_node("d")
graph.redirect(g, g["a"], "d")
self.assertTrue(len(g["a"].edges) == 0)
self.assertTrue(len(g["d"].edges) == 2)
self.assertTrue(g.edge("d", "c").node1 == g["c"])
print("pattern.graph.redirect()")
def test_cut(self):
# Assert unlink Node(b) and redirect a->c and a->d.
g = graph.Graph()
g.add_edge("a", "b")
g.add_edge("b", "c")
g.add_edge("b", "d")
graph.cut(g, g["b"])
self.assertTrue(len(g["b"].edges) == 0)
self.assertTrue(g.edge("a", "c") is not None)
self.assertTrue(g.edge("a", "d") is not None)
print("pattern.graph.cut()")
def test_insert(self):
g = graph.Graph()
g.add_edge("a", "b")
g.add_node("c")
graph.insert(g, g["c"], g["a"], g["b"])
self.assertTrue(g.edge("a", "b") is None)
self.assertTrue(g.edge("a", "c") is not None)
self.assertTrue(g.edge("c", "b") is not None)
print("pattern.graph.insert()")
#---------------------------------------------------------------------------------------------------
class TestGraphCommonsense(unittest.TestCase):
def setUp(self):
pass
def test_halo(self):
# Assert concept halo (e.g., latent related concepts).
g = commonsense.Commonsense()
v = [concept.id for concept in g["rose"].halo]
self.assertTrue("red" in v)
self.assertTrue("romance" in v)
# Concept.properties is the list of properties (adjectives) in the halo.
v = g["rose"].properties
self.assertTrue("red" in v)
self.assertTrue("romance" not in v)
print("pattern.graph.commonsense.Concept.halo")
print("pattern.graph.commonsense.Concept.properties")
def test_field(self):
# Assert semantic field (e.g., concept taxonomy).
g = commonsense.Commonsense()
v = [concept.id for concept in g.field("color")]
self.assertTrue("red" in v)
self.assertTrue("green" in v)
self.assertTrue("blue" in v)
print("pattern.graph.commonsense.Commonsense.field()")
def test_similarity(self):
# Assert that tiger is more similar to lion than to spoon
# (which is common sense).
g = commonsense.Commonsense()
w1 = g.similarity("tiger", "lion")
w2 = g.similarity("tiger", "spoon")
self.assertTrue(w1 > w2)
print("pattern.graph.commonsense.Commonsense.similarity()")
#---------------------------------------------------------------------------------------------------
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestUtilityFunctions))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestNode))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestEdge))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGraph))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGraphLayout))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGraphSpringLayout))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGraphTraversal))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGraphPartitioning))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGraphMaintenance))
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(TestGraphCommonsense))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=1).run(suite())
sys.exit(not result.wasSuccessful())
| |
from __future__ import absolute_import, division, print_function
import pytest
import json
import stripe
import urllib3
from stripe import six, util
VALID_API_METHODS = ("get", "post", "delete")
class StripeClientTestCase(object):
REQUEST_LIBRARIES = ["urlfetch", "requests", "pycurl", "urllib.request"]
@pytest.fixture
def request_mocks(self, mocker):
request_mocks = {}
for lib in self.REQUEST_LIBRARIES:
request_mocks[lib] = mocker.patch("stripe.http_client.%s" % (lib,))
return request_mocks
class TestNewDefaultHttpClient(StripeClientTestCase):
@pytest.fixture(autouse=True)
def setup_warnings(self, request_mocks):
original_filters = stripe.http_client.warnings.filters[:]
stripe.http_client.warnings.simplefilter("ignore")
yield
stripe.http_client.warnings.filters = original_filters
def check_default(self, none_libs, expected):
for lib in none_libs:
setattr(stripe.http_client, lib, None)
inst = stripe.http_client.new_default_http_client()
assert isinstance(inst, expected)
def test_new_default_http_client_urlfetch(self):
self.check_default((), stripe.http_client.UrlFetchClient)
def test_new_default_http_client_requests(self):
self.check_default(("urlfetch",), stripe.http_client.RequestsClient)
def test_new_default_http_client_pycurl(self):
self.check_default(
("urlfetch", "requests"), stripe.http_client.PycurlClient
)
def test_new_default_http_client_urllib2(self):
self.check_default(
("urlfetch", "requests", "pycurl"),
stripe.http_client.Urllib2Client,
)
class TestRetrySleepTimeDefaultHttpClient(StripeClientTestCase):
from contextlib import contextmanager
def assert_sleep_times(self, client, expected):
until = len(expected)
actual = list(
map(lambda i: client._sleep_time_seconds(i + 1), range(until))
)
assert expected == actual
@contextmanager
def mock_max_delay(self, new_value):
original_value = stripe.http_client.HTTPClient.MAX_DELAY
stripe.http_client.HTTPClient.MAX_DELAY = new_value
try:
yield self
finally:
stripe.http_client.HTTPClient.MAX_DELAY = original_value
def test_sleep_time_exponential_back_off(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
with self.mock_max_delay(10):
self.assert_sleep_times(client, [0.5, 1.0, 2.0, 4.0, 8.0])
def test_initial_delay_as_minimum(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t * 0.001
initial_delay = stripe.http_client.HTTPClient.INITIAL_DELAY
self.assert_sleep_times(client, [initial_delay] * 5)
def test_maximum_delay(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
max_delay = stripe.http_client.HTTPClient.MAX_DELAY
expected = [0.5, 1.0, max_delay, max_delay, max_delay]
self.assert_sleep_times(client, expected)
def test_retry_after_header(self):
client = stripe.http_client.new_default_http_client()
client._add_jitter_time = lambda t: t
# Prefer retry-after if it's bigger
assert 30 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "30"})
)
# Prefer default if it's bigger
assert 2 == client._sleep_time_seconds(
3, (None, 409, {"retry-after": "1"})
)
# Ignore crazy-big values
assert 1 == client._sleep_time_seconds(
2, (None, 409, {"retry-after": "300"})
)
def test_randomness_added(self):
client = stripe.http_client.new_default_http_client()
random_value = 0.8
client._add_jitter_time = lambda t: t * random_value
base_value = stripe.http_client.HTTPClient.INITIAL_DELAY * random_value
with self.mock_max_delay(10):
expected = [
stripe.http_client.HTTPClient.INITIAL_DELAY,
base_value * 2,
base_value * 4,
base_value * 8,
base_value * 16,
]
self.assert_sleep_times(client, expected)
def test_jitter_has_randomness_but_within_range(self):
client = stripe.http_client.new_default_http_client()
jittered_ones = set(
map(lambda _: client._add_jitter_time(1), list(range(100)))
)
assert len(jittered_ones) > 1
assert all(0.5 <= val <= 1 for val in jittered_ones)
class TestRetryConditionsDefaultHttpClient(StripeClientTestCase):
def test_should_retry_on_codes(self):
one_xx = list(range(100, 104))
two_xx = list(range(200, 209))
three_xx = list(range(300, 308))
four_xx = list(range(400, 431))
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
codes = one_xx + two_xx + three_xx + four_xx
codes.remove(409)
# These status codes should not be retried by default.
for code in codes:
assert client._should_retry((None, code, None), None, 0) is False
# These status codes should be retried by default.
assert client._should_retry((None, 409, None), None, 0) is True
assert client._should_retry((None, 500, None), None, 0) is True
assert client._should_retry((None, 503, None), None, 0) is True
def test_should_retry_on_error(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert client._should_retry(None, api_connection_error, 0) is True
api_connection_error.should_retry = False
assert client._should_retry(None, api_connection_error, 0) is False
def test_should_retry_on_stripe_should_retry_true(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "true"}
# Ordinarily, we would not retry a 400, but with the header as true, we would.
assert client._should_retry((None, 400, {}), None, 0) is False
assert client._should_retry((None, 400, headers), None, 0) is True
def test_should_retry_on_stripe_should_retry_false(self, mocker):
client = stripe.http_client.new_default_http_client()
client._max_network_retries = lambda: 1
headers = {"stripe-should-retry": "false"}
# Ordinarily, we would retry a 500, but with the header as false, we would not.
assert client._should_retry((None, 500, {}), None, 0) is True
assert client._should_retry((None, 500, headers), None, 0) is False
def test_should_retry_on_num_retries(self, mocker):
client = stripe.http_client.new_default_http_client()
max_test_retries = 10
client._max_network_retries = lambda: max_test_retries
api_connection_error = mocker.Mock()
api_connection_error.should_retry = True
assert (
client._should_retry(
None, api_connection_error, max_test_retries + 1
)
is False
)
assert (
client._should_retry((None, 409, None), None, max_test_retries + 1)
is False
)
class TestHTTPClient(object):
@pytest.fixture(autouse=True)
def setup_stripe(self):
orig_attrs = {"enable_telemetry": stripe.enable_telemetry}
stripe.enable_telemetry = False
yield
stripe.enable_telemetry = orig_attrs["enable_telemetry"]
def test_sends_telemetry_on_second_request(self, mocker):
class TestClient(stripe.http_client.HTTPClient):
pass
stripe.enable_telemetry = True
url = "http://fake.url"
client = TestClient()
client.request = mocker.MagicMock(
return_value=["", 200, {"Request-Id": "req_123"}]
)
_, code, _ = client.request_with_retries("get", url, {}, None)
assert code == 200
client.request.assert_called_with("get", url, {}, None)
client.request = mocker.MagicMock(
return_value=["", 200, {"Request-Id": "req_234"}]
)
_, code, _ = client.request_with_retries("get", url, {}, None)
assert code == 200
args, _ = client.request.call_args
assert "X-Stripe-Client-Telemetry" in args[2]
telemetry = json.loads(args[2]["X-Stripe-Client-Telemetry"])
assert telemetry["last_request_metrics"]["request_id"] == "req_123"
class ClientTestBase(object):
@pytest.fixture
def request_mock(self, request_mocks):
return request_mocks[self.REQUEST_CLIENT.name]
@property
def valid_url(self, path="/foo"):
return "https://api.stripe.com%s" % (path,)
def make_request(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return client.request_with_retries(method, url, headers, post_data)
def make_request_stream(self, method, url, headers, post_data):
client = self.REQUEST_CLIENT(verify_ssl_certs=True)
return client.request_stream_with_retries(
method, url, headers, post_data
)
@pytest.fixture
def mock_response(self):
def mock_response(mock, body, code):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock, error):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return mock_error
@pytest.fixture
def check_call(self):
def check_call(
mock, method, abs_url, headers, params, is_streaming=False
):
raise NotImplementedError(
"You must implement this in your test subclass"
)
return check_call
def test_request(self, request_mock, mock_response, check_call):
mock_response(request_mock, '{"foo": "baz"}', 200)
for method in VALID_API_METHODS:
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
body, code, _ = self.make_request(method, abs_url, headers, data)
assert code == 200
assert body == '{"foo": "baz"}'
check_call(request_mock, method, abs_url, data, headers)
def test_request_stream(
self, mocker, request_mock, mock_response, check_call
):
for method in VALID_API_METHODS:
mock_response(request_mock, "some streamed content", 200)
abs_url = self.valid_url
data = ""
if method != "post":
abs_url = "%s?%s" % (abs_url, data)
data = None
headers = {"my-header": "header val"}
print(dir(self))
print("make_request_stream" in dir(self))
stream, code, _ = self.make_request_stream(
method, abs_url, headers, data
)
assert code == 200
# Here we need to convert and align all content on one type (string)
# as some clients return a string stream others a byte stream.
body_content = stream.read()
if hasattr(body_content, "decode"):
body_content = body_content.decode("utf-8")
assert body_content == "some streamed content"
mocker.resetall()
def test_exception(self, request_mock, mock_error):
mock_error(request_mock)
with pytest.raises(stripe.error.APIConnectionError):
self.make_request("get", self.valid_url, {}, None)
class RequestsVerify(object):
def __eq__(self, other):
return other and other.endswith("stripe/data/ca-certificates.crt")
class TestRequestsClient(StripeClientTestCase, ClientTestBase):
REQUEST_CLIENT = stripe.http_client.RequestsClient
@pytest.fixture
def session(self, mocker, request_mocks):
return mocker.MagicMock()
@pytest.fixture
def mock_response(self, mocker, session):
def mock_response(mock, body, code):
result = mocker.Mock()
result.content = body
result.status_code = code
result.headers = {}
result.raw = urllib3.response.HTTPResponse(
body=util.io.BytesIO(str.encode(body)),
preload_content=False,
status=code,
)
session.request = mocker.MagicMock(return_value=result)
mock.Session = mocker.MagicMock(return_value=session)
return mock_response
@pytest.fixture
def mock_error(self, mocker, session):
def mock_error(mock):
# The first kind of request exceptions we catch
mock.exceptions.SSLError = Exception
session.request.side_effect = mock.exceptions.SSLError()
mock.Session = mocker.MagicMock(return_value=session)
return mock_error
# Note that unlike other modules, we don't use the "mock" argument here
# because we need to run the request call against the internal mock
# session.
@pytest.fixture
def check_call(self, session):
def check_call(
mock,
method,
url,
post_data,
headers,
is_streaming=False,
timeout=80,
times=None,
):
times = times or 1
args = (method, url)
kwargs = {
"headers": headers,
"data": post_data,
"verify": RequestsVerify(),
"proxies": {"http": "http://slap/", "https": "http://slap/"},
"timeout": timeout,
}
if is_streaming:
kwargs["stream"] = True
calls = [(args, kwargs) for _ in range(times)]
session.request.assert_has_calls(calls)
return check_call
def make_request(self, method, url, headers, post_data, timeout=80):
client = self.REQUEST_CLIENT(
verify_ssl_certs=True, timeout=timeout, proxy="http://slap/"
)
return client.request_with_retries(method, url, headers, post_data)
def make_request_stream(self, method, url, headers, post_data, timeout=80):
client = self.REQUEST_CLIENT(
verify_ssl_certs=True, timeout=timeout, proxy="http://slap/"
)
return client.request_stream_with_retries(
method, url, headers, post_data
)
def test_timeout(self, request_mock, mock_response, check_call):
headers = {"my-header": "header val"}
data = ""
mock_response(request_mock, '{"foo": "baz"}', 200)
self.make_request("POST", self.valid_url, headers, data, timeout=5)
check_call(None, "POST", self.valid_url, data, headers, timeout=5)
def test_request_stream_forwards_stream_param(
self, mocker, request_mock, mock_response, check_call
):
mock_response(request_mock, "some streamed content", 200)
self.make_request_stream("GET", self.valid_url, {}, None)
check_call(
None,
"GET",
self.valid_url,
None,
{},
is_streaming=True,
)
class TestRequestClientRetryBehavior(TestRequestsClient):
@pytest.fixture
def response(self, mocker):
def response(code=200, headers={}):
result = mocker.Mock()
result.content = "{}"
result.status_code = code
result.headers = headers
result.raw = urllib3.response.HTTPResponse(
body=util.io.BytesIO(str.encode(result.content)),
preload_content=False,
status=code,
)
return result
return response
@pytest.fixture
def mock_retry(self, mocker, session, request_mock):
def mock_retry(retry_error_num=0, no_retry_error_num=0, responses=[]):
# Mocking classes of exception we catch. Any group of exceptions
# with the same inheritance pattern will work
request_root_error_class = Exception
request_mock.exceptions.RequestException = request_root_error_class
no_retry_parent_class = LookupError
no_retry_child_class = KeyError
request_mock.exceptions.SSLError = no_retry_parent_class
no_retry_errors = [no_retry_child_class()] * no_retry_error_num
retry_parent_class = EnvironmentError
retry_child_class = IOError
request_mock.exceptions.Timeout = retry_parent_class
request_mock.exceptions.ConnectionError = retry_parent_class
retry_errors = [retry_child_class()] * retry_error_num
# Include mock responses as possible side-effects
# to simulate returning proper results after some exceptions
session.request.side_effect = (
retry_errors + no_retry_errors + responses
)
request_mock.Session = mocker.MagicMock(return_value=session)
return request_mock
return mock_retry
@pytest.fixture
def check_call_numbers(self, check_call):
valid_url = self.valid_url
def check_call_numbers(times, is_streaming=False):
check_call(
None,
"GET",
valid_url,
None,
{},
times=times,
is_streaming=is_streaming,
)
return check_call_numbers
def max_retries(self):
return 3
def make_client(self):
client = self.REQUEST_CLIENT(
verify_ssl_certs=True, timeout=80, proxy="http://slap/"
)
# Override sleep time to speed up tests
client._sleep_time = lambda _: 0.0001
# Override configured max retries
client._max_network_retries = lambda: self.max_retries()
return client
def make_request(self, *args, **kwargs):
client = self.make_client()
return client.request_with_retries("GET", self.valid_url, {}, None)
def make_request_stream(self, *args, **kwargs):
client = self.make_client()
return client.request_stream_with_retries(
"GET", self.valid_url, {}, None
)
def test_retry_error_until_response(
self, mock_retry, response, check_call_numbers
):
mock_retry(retry_error_num=1, responses=[response(code=202)])
_, code, _ = self.make_request()
assert code == 202
check_call_numbers(2)
def test_retry_error_until_exceeded(
self, mock_retry, response, check_call_numbers
):
mock_retry(retry_error_num=self.max_retries())
with pytest.raises(stripe.error.APIConnectionError):
self.make_request()
check_call_numbers(self.max_retries())
def test_no_retry_error(self, mock_retry, response, check_call_numbers):
mock_retry(no_retry_error_num=self.max_retries())
with pytest.raises(stripe.error.APIConnectionError):
self.make_request()
check_call_numbers(1)
def test_retry_codes(self, mock_retry, response, check_call_numbers):
mock_retry(responses=[response(code=409), response(code=202)])
_, code, _ = self.make_request()
assert code == 202
check_call_numbers(2)
def test_retry_codes_until_exceeded(
self, mock_retry, response, check_call_numbers
):
mock_retry(responses=[response(code=409)] * (self.max_retries() + 1))
_, code, _ = self.make_request()
assert code == 409
check_call_numbers(self.max_retries() + 1)
def test_retry_request_stream_error_until_response(
self, mock_retry, response, check_call_numbers
):
mock_retry(retry_error_num=1, responses=[response(code=202)])
_, code, _ = self.make_request_stream()
assert code == 202
check_call_numbers(2, is_streaming=True)
def test_retry_request_stream_error_until_exceeded(
self, mock_retry, response, check_call_numbers
):
mock_retry(retry_error_num=self.max_retries())
with pytest.raises(stripe.error.APIConnectionError):
self.make_request_stream()
check_call_numbers(self.max_retries(), is_streaming=True)
def test_no_retry_request_stream_error(
self, mock_retry, response, check_call_numbers
):
mock_retry(no_retry_error_num=self.max_retries())
with pytest.raises(stripe.error.APIConnectionError):
self.make_request_stream()
check_call_numbers(1, is_streaming=True)
def test_retry_request_stream_codes(
self, mock_retry, response, check_call_numbers
):
mock_retry(responses=[response(code=409), response(code=202)])
_, code, _ = self.make_request_stream()
assert code == 202
check_call_numbers(2, is_streaming=True)
def test_retry_request_stream_codes_until_exceeded(
self, mock_retry, response, check_call_numbers
):
mock_retry(responses=[response(code=409)] * (self.max_retries() + 1))
_, code, _ = self.make_request_stream()
assert code == 409
check_call_numbers(self.max_retries() + 1, is_streaming=True)
@pytest.fixture
def connection_error(self, session):
client = self.REQUEST_CLIENT()
def connection_error(given_exception):
with pytest.raises(stripe.error.APIConnectionError) as error:
client._handle_request_error(given_exception)
return error.value
return connection_error
def test_handle_request_error_should_retry(
self, connection_error, mock_retry
):
request_mock = mock_retry()
error = connection_error(request_mock.exceptions.Timeout())
assert error.should_retry
error = connection_error(request_mock.exceptions.ConnectionError())
assert error.should_retry
def test_handle_request_error_should_not_retry(
self, connection_error, mock_retry
):
request_mock = mock_retry()
error = connection_error(request_mock.exceptions.SSLError())
assert error.should_retry is False
assert "not verify Stripe's SSL certificate" in error.user_message
error = connection_error(request_mock.exceptions.RequestException())
assert error.should_retry is False
# Mimic non-requests exception as not being children of Exception,
# See mock_retry for the exceptions setup
error = connection_error(BaseException(""))
assert error.should_retry is False
assert "configuration issue locally" in error.user_message
# Skip inherited basic requests client tests
def test_request(self, request_mock, mock_response, check_call):
pass
def test_exception(self, request_mock, mock_error):
pass
def test_timeout(self, request_mock, mock_response, check_call):
pass
class TestUrlFetchClient(StripeClientTestCase, ClientTestBase):
REQUEST_CLIENT = stripe.http_client.UrlFetchClient
@pytest.fixture
def mock_response(self, mocker):
def mock_response(mock, body, code):
result = mocker.Mock()
result.content = body
result.status_code = code
result.headers = {}
mock.fetch = mocker.Mock(return_value=result)
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock):
mock.Error = mock.InvalidURLError = Exception
mock.fetch.side_effect = mock.InvalidURLError()
return mock_error
@pytest.fixture
def check_call(self):
def check_call(
mock, method, url, post_data, headers, is_streaming=False
):
mock.fetch.assert_called_with(
url=url,
method=method,
headers=headers,
validate_certificate=True,
deadline=55,
payload=post_data,
)
return check_call
class TestUrllib2Client(StripeClientTestCase, ClientTestBase):
REQUEST_CLIENT = stripe.http_client.Urllib2Client
def make_client(self, proxy):
self.client = self.REQUEST_CLIENT(verify_ssl_certs=True, proxy=proxy)
self.proxy = proxy
def make_request(self, method, url, headers, post_data, proxy=None):
self.make_client(proxy)
return self.client.request_with_retries(
method, url, headers, post_data
)
def make_request_stream(self, method, url, headers, post_data, proxy=None):
self.make_client(proxy)
return self.client.request_stream_with_retries(
method, url, headers, post_data
)
@pytest.fixture
def mock_response(self, mocker):
def mock_response(mock, body, code):
response = mocker.Mock()
response.read = mocker.MagicMock(return_value=body)
response.code = code
response.info = mocker.Mock(return_value={})
self.request_object = mocker.Mock()
mock.Request = mocker.Mock(return_value=self.request_object)
mock.urlopen = mocker.Mock(return_value=response)
opener = mocker.Mock()
opener.open = mocker.Mock(return_value=response)
mock.build_opener = mocker.Mock(return_value=opener)
mock.build_opener.open = opener.open
mock.ProxyHandler = mocker.Mock(return_value=opener)
mock.urlopen = mocker.Mock(return_value=response)
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock):
mock.urlopen.side_effect = ValueError
mock.build_opener().open.side_effect = ValueError
mock.build_opener.reset_mock()
return mock_error
@pytest.fixture
def check_call(self):
def check_call(
mock, method, url, post_data, headers, is_streaming=False
):
if six.PY3 and isinstance(post_data, six.string_types):
post_data = post_data.encode("utf-8")
mock.Request.assert_called_with(url, post_data, headers)
if self.client._proxy:
assert isinstance(self.client._proxy, dict)
mock.ProxyHandler.assert_called_with(self.client._proxy)
mock.build_opener.open.assert_called_with(self.request_object)
assert not mock.urlopen.called
if not self.client._proxy:
mock.urlopen.assert_called_with(self.request_object)
assert not mock.build_opener.called
assert not mock.build_opener.open.called
return check_call
class TestUrllib2ClientHttpsProxy(TestUrllib2Client):
def make_request(self, method, url, headers, post_data, proxy=None):
return super(TestUrllib2ClientHttpsProxy, self).make_request(
method,
url,
headers,
post_data,
{"http": "http://slap/", "https": "http://slap/"},
)
def make_request_stream(self, method, url, headers, post_data, proxy=None):
return super(TestUrllib2ClientHttpsProxy, self).make_request_stream(
method,
url,
headers,
post_data,
{"http": "http://slap/", "https": "http://slap/"},
)
class TestUrllib2ClientHttpProxy(TestUrllib2Client):
def make_request(self, method, url, headers, post_data, proxy=None):
return super(TestUrllib2ClientHttpProxy, self).make_request(
method, url, headers, post_data, "http://slap/"
)
def make_request_stream(self, method, url, headers, post_data, proxy=None):
return super(TestUrllib2ClientHttpProxy, self).make_request_stream(
method, url, headers, post_data, "http://slap/"
)
class TestPycurlClient(StripeClientTestCase, ClientTestBase):
REQUEST_CLIENT = stripe.http_client.PycurlClient
def make_client(self, proxy):
self.client = self.REQUEST_CLIENT(verify_ssl_certs=True, proxy=proxy)
self.proxy = proxy
def make_request(self, method, url, headers, post_data, proxy=None):
self.make_client(proxy)
return self.client.request_with_retries(
method, url, headers, post_data
)
def make_request_stream(self, method, url, headers, post_data, proxy=None):
self.make_client(proxy)
return self.client.request_stream_with_retries(
method, url, headers, post_data
)
@pytest.fixture
def curl_mock(self, mocker):
return mocker.Mock()
@pytest.fixture
def request_mock(self, mocker, request_mocks, curl_mock):
lib_mock = request_mocks[self.REQUEST_CLIENT.name]
lib_mock.Curl = mocker.Mock(return_value=curl_mock)
return curl_mock
@pytest.fixture
def bio_mock(self, mocker):
bio_patcher = mocker.patch("stripe.util.io.BytesIO")
bio_mock = mocker.Mock()
bio_patcher.return_value = bio_mock
return bio_mock
@pytest.fixture
def mock_response(self, mocker, bio_mock):
def mock_response(mock, body, code):
bio_mock.getvalue = mocker.MagicMock(
return_value=body.encode("utf-8")
)
bio_mock.read = mocker.MagicMock(return_value=body.encode("utf-8"))
mock.getinfo.return_value = code
return mock_response
@pytest.fixture
def mock_error(self):
def mock_error(mock):
class FakeException(BaseException):
@property
def args(self):
return ("foo", "bar")
stripe.http_client.pycurl.error = FakeException
mock.perform.side_effect = stripe.http_client.pycurl.error
return mock_error
@pytest.fixture
def check_call(self, request_mocks):
def check_call(
mock, method, url, post_data, headers, is_streaming=False
):
lib_mock = request_mocks[self.REQUEST_CLIENT.name]
if self.client._proxy:
proxy = self.client._get_proxy(url)
assert proxy is not None
if proxy.hostname:
mock.setopt.assert_any_call(lib_mock.PROXY, proxy.hostname)
if proxy.port:
mock.setopt.assert_any_call(lib_mock.PROXYPORT, proxy.port)
if proxy.username or proxy.password:
mock.setopt.assert_any_call(
lib_mock.PROXYUSERPWD,
"%s:%s" % (proxy.username, proxy.password),
)
# A note on methodology here: we don't necessarily need to verify
# _every_ call to setopt, but check a few of them to make sure the
# right thing is happening. Keep an eye specifically on conditional
# statements where things are more likely to go wrong.
mock.setopt.assert_any_call(lib_mock.NOSIGNAL, 1)
mock.setopt.assert_any_call(lib_mock.URL, stripe.util.utf8(url))
if method == "get":
mock.setopt.assert_any_call(lib_mock.HTTPGET, 1)
elif method == "post":
mock.setopt.assert_any_call(lib_mock.POST, 1)
else:
mock.setopt.assert_any_call(
lib_mock.CUSTOMREQUEST, method.upper()
)
mock.perform.assert_any_call()
return check_call
class TestPycurlClientHttpProxy(TestPycurlClient):
def make_request(self, method, url, headers, post_data, proxy=None):
return super(TestPycurlClientHttpProxy, self).make_request(
method,
url,
headers,
post_data,
"http://user:withPwd@slap:8888/",
)
def make_request_stream(self, method, url, headers, post_data, proxy=None):
return super(TestPycurlClientHttpProxy, self).make_request_stream(
method,
url,
headers,
post_data,
"http://user:withPwd@slap:8888/",
)
class TestPycurlClientHttpsProxy(TestPycurlClient):
def make_request(self, method, url, headers, post_data, proxy=None):
return super(TestPycurlClientHttpsProxy, self).make_request(
method,
url,
headers,
post_data,
{"http": "http://slap:8888/", "https": "http://slap2:444/"},
)
def make_request_stream(self, method, url, headers, post_data, proxy=None):
return super(TestPycurlClientHttpsProxy, self).make_request_stream(
method,
url,
headers,
post_data,
{"http": "http://slap:8888/", "https": "http://slap2:444/"},
)
class TestAPIEncode(StripeClientTestCase):
def test_encode_dict(self):
body = {"foo": {"dob": {"month": 1}, "name": "bat"}}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[dob][month]", 1) in values
assert ("foo[name]", "bat") in values
def test_encode_array(self):
body = {"foo": [{"dob": {"month": 1}, "name": "bat"}]}
values = [t for t in stripe.api_requestor._api_encode(body)]
assert ("foo[0][dob][month]", 1) in values
assert ("foo[0][name]", "bat") in values
| |
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sessionvalidation.session as session
import sessionvalidation.transaction as transaction
import sessionvalidation.request as request
import sessionvalidation.response as response
# valid_HTTP_request_methods = ['GET', 'POST', 'HEAD']
# custom_HTTP_request_methods = ['PULL'] # transaction monitor plugin for ATS may have custom methods
allowed_HTTP_request_methods = ['GET', 'POST', 'HEAD', 'PULL']
G_CUSTOM_METHODS = False
G_VERBOSE_LOG = True
def _verbose_print(msg, verbose_on=False):
''' Print msg if verbose_on is set to True or G_VERBOSE_LOG is set to True'''
if verbose_on or G_VERBOSE_LOG:
print(msg)
class SessionValidator(object):
'''
SessionValidator parses, validates, and exports an API for a given set of JSON sessions generated from Apache Traffic Server
SessionValidator is initialized with a path to a directory of JSON sessions. It then automatically parses and validates all the
session in the directory. After initialization, the user may use the provided API
TODO :
Provide a list of guaranteed fields for each type of object (ie a Transaction has a request and a response, a request has ...)
'''
def parse(self):
'''
Constructs Session objects from JSON files on disk and stores objects into _sessions
All sessions missing required fields (ie. a session timestamp, a response for every request, etc) are
dropped and the filename is stored inside _bad_sessions
'''
log_filenames = [os.path.join(self._json_log_dir, f) for f in os.listdir(
self._json_log_dir) if os.path.isfile(os.path.join(self._json_log_dir, f))]
for fname in log_filenames:
with open(fname) as f:
# first attempt to load the JSON
try:
sesh = json.load(f)
except:
self._bad_sessions.append(fname)
_verbose_print("Warning: JSON parse error on file={0}".format(fname))
print("Warning: JSON parse error on file={0}".format(fname))
continue
# then attempt to extract all the required fields from the JSON
try:
session_timestamp = sesh['timestamp']
session_version = sesh['version']
session_txns = list()
for txn in sesh['txns']:
# create transaction Request object
txn_request = txn['request']
txn_request_body = ''
if 'body' in txn_request:
txn_request_body = txn_request['body']
txn_request_obj = request.Request(txn_request['timestamp'], txn_request['headers'], txn_request_body)
# Create transaction Response object
txn_response = txn['response']
txn_response_body = ''
if 'body' in txn_response:
txn_response_body = txn_response['body']
txn_response_obj = response.Response(txn_response['timestamp'], txn_response['headers'], txn_response_body,
txn_response.get('options'))
# create Transaction object
txn_obj = transaction.Transaction(txn_request_obj, txn_response_obj, txn['uuid'])
session_txns.append(txn_obj)
session_obj = session.Session(fname, session_version, session_timestamp, session_txns)
except KeyError as e:
self._bad_sessions.append(fname)
print("Warning: parse error on key={0} for file={1}".format(e, fname))
_verbose_print("Warning: parse error on key={0} for file={1}".format(e, fname))
continue
self._sessions.append(session_obj)
def validate(self):
''' Prunes out all the invalid Sessions in _sessions '''
good_sessions = list()
for sesh in self._sessions:
if SessionValidator.validateSingleSession(sesh):
good_sessions.append(sesh)
else:
self._bad_sessions.append(sesh._filename)
self._sessions = good_sessions
@staticmethod
def validateSingleSession(sesh):
''' Takes in a single Session object as input, returns whether or not the Session is valid '''
retval = True
try:
# first validate fields
if not sesh._filename:
_verbose_print("bad session filename")
retval = False
elif not sesh._version:
_verbose_print("bad session version")
retval = False
elif float(sesh._timestamp) <= 0:
_verbose_print("bad session timestamp")
retval = False
elif not bool(sesh.getTransactionList()):
_verbose_print("session has no transaction list")
retval = False
# validate Transactions now
for txn in sesh.getTransactionIter():
if not SessionValidator.validateSingleTransaction(txn):
retval = False
except ValueError as e:
_verbose_print("most likely an invalid session timestamp")
retval = False
return retval
@staticmethod
def validateSingleTransaction(txn):
''' Takes in a single Transaction object as input, and returns whether or not the Transaction is valid '''
txn_req = txn.getRequest()
txn_resp = txn.getResponse()
retval = True
#valid_HTTP_request_methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'OPTIONS', 'CONNECT', 'PATCH']
# we can later uncomment the previous line to support more HTTP methods
valid_HTTP_versions = ['HTTP/1.0', 'HTTP/1.1', 'HTTP/2.0']
try:
# validate request first
if not txn_req:
_verbose_print("no transaction request")
retval = False
elif txn_req.getBody() == None:
_verbose_print("transaction body is set to None")
retval = False
elif float(txn_req.getTimestamp()) <= 0:
_verbose_print("invalid transaction request timestamp")
retval = False
elif txn_req.getHeaders().split()[0] not in allowed_HTTP_request_methods:
_verbose_print("invalid HTTP method for transaction {0}".format(txn_req.getHeaders().split()[0]))
retval = False
elif not txn_req.getHeaders().endswith("\r\n\r\n"):
_verbose_print("transaction request headers didn't end with \\r\\n\\r\\n")
retval = False
elif txn_req.getHeaders().split()[2] not in valid_HTTP_versions:
_verbose_print("invalid HTTP version in request")
retval = False
# if the Host header is not present and vaild we reject this transaction
found_host = False
for header in txn_req.getHeaders().split('\r\n'):
split_header = header.split(' ')
if split_header[0] == 'Host:':
found_host = True
host_header_no_space = len(split_header) == 1
host_header_with_space = len(split_header) == 2 and split_header[1] == ''
if host_header_no_space or host_header_with_space:
found_host = False
if not found_host:
print("missing host", txn_req)
_verbose_print("transaction request Host header doesn't have specified host")
retval = False
# now validate response
if not txn_resp:
_verbose_print("no transaction response")
retval = False
elif txn_resp.getBody() == None:
_verbose_print("transaction response body set to None")
retval = False
elif float(txn_resp.getTimestamp()) <= 0:
_verbose_print("invalid transaction response timestamp")
retval = False
elif txn_resp.getHeaders().split()[0] not in valid_HTTP_versions:
_verbose_print("invalid HTTP response header")
retval = False
elif not txn_resp.getHeaders().endswith("\r\n\r\n"):
_verbose_print("transaction response headers didn't end with \\r\\n\\r\\n")
retval = False
# if any of the 3xx responses have bodies, then the must reject this transaction, since 3xx
# errors by definition can't have bodies
response_line = txn_resp.getHeaders().split('\r\n')[0]
response_code = response_line.split(' ')[1]
if response_code.startswith('3') and txn_resp.getBody():
_verbose_print("transaction response was 3xx and had a body")
retval = False
except ValueError as e:
_verbose_print("most likely an invalid transaction timestamp")
retval = False
except IndexError as e:
_verbose_print("most likely a bad transaction header")
retval = False
return retval
def getSessionList(self):
''' Returns the list of Session objects '''
return self._sessions
def getSessionIter(self):
''' Returns an iterator of the Session objects '''
return iter(self._sessions)
def getBadSessionList(self):
''' Returns a list of bad session filenames (list of strings) '''
return self._bad_sessions
def getBadSessionListIter(self):
''' Returns an iterator of bad session filenames (iterator of strings) '''
return iter(self._bad_sessions)
def __init__(self, json_log_dir, allow_custom=False):
global valid_HTTP_request_methods
global G_CUSTOM_METHODS
G_CUSTOM_METHODS = allow_custom
self._json_log_dir = json_log_dir
self._bad_sessions = list() # list of filenames
self._sessions = list() # list of _good_ session objects
self.parse()
self.validate()
| |
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conditions / splits for non-leaf nodes.
A condition (e.g. a>0.5) is evaluated to a binary value (e.g. True if a=5).
Condition evaluations control the branching of an example in a tree.
"""
import abc
from typing import List, Union, Optional
import six
from tensorflow_decision_forests.component.py_tree import dataspec as dataspec_lib
from yggdrasil_decision_forests.dataset import data_spec_pb2
from yggdrasil_decision_forests.model.decision_tree import decision_tree_pb2
ColumnType = data_spec_pb2.ColumnType
SimpleColumnSpec = dataspec_lib.SimpleColumnSpec
@six.add_metaclass(abc.ABCMeta)
class AbstractCondition(object):
"""Generic condition.
Attrs:
missing_evaluation: Result of the evaluation of the condition if the feature
is missing. If None, a feature cannot be missing or a specific method run
during inference to handle missing values.
"""
def __init__(self, missing_evaluation: Optional[bool]):
self._missing_evaluation = missing_evaluation
@property
def missing_evaluation(self):
return self._missing_evaluation
@abc.abstractmethod
def features(self) -> List[SimpleColumnSpec]:
"""List of features used to evaluate the condition."""
pass
def __repr__(self):
return (f"AbstractCondition({self.features()}, "
f"missing_evaluation={self._missing_evaluation})")
class IsMissingInCondition(AbstractCondition):
"""Condition of the form "attribute is missing"."""
def __init__(self, feature: SimpleColumnSpec):
super(IsMissingInCondition, self).__init__(None)
self._feature = feature
def features(self):
return [self._feature]
def __repr__(self):
return f"({self._feature.name} is missing)"
def __eq__(self, other):
if not isinstance(other, IsMissingInCondition):
return False
return self._feature == other._feature
@property
def feature(self):
return self._feature
class IsTrueCondition(AbstractCondition):
"""Condition of the form "attribute is true"."""
def __init__(self, feature: SimpleColumnSpec,
missing_evaluation: Optional[bool]):
super(IsTrueCondition, self).__init__(missing_evaluation)
self._feature = feature
def features(self):
return [self._feature]
def __repr__(self):
return f"({self._feature.name} is true; miss={self.missing_evaluation})"
def __eq__(self, other):
if not isinstance(other, IsTrueCondition):
return False
return self._feature == other._feature
@property
def feature(self):
return self._feature
class NumericalHigherThanCondition(AbstractCondition):
"""Condition of the form "attribute >= threhsold"."""
def __init__(self, feature: SimpleColumnSpec, threshold: float,
missing_evaluation: Optional[bool]):
super(NumericalHigherThanCondition, self).__init__(missing_evaluation)
self._feature = feature
self._threshold = threshold
def features(self):
return [self._feature]
def __repr__(self):
return (f"({self._feature.name} >= {self._threshold}; "
f"miss={self.missing_evaluation})")
def __eq__(self, other):
if not isinstance(other, NumericalHigherThanCondition):
return False
return (self._feature == other._feature and
self._threshold == other._threshold)
@property
def feature(self):
return self._feature
@property
def threshold(self):
return self._threshold
class CategoricalIsInCondition(AbstractCondition):
"""Condition of the form "attribute in [...set of items...]"."""
def __init__(self, feature: SimpleColumnSpec, mask: Union[List[str],
List[int]],
missing_evaluation: Optional[bool]):
super(CategoricalIsInCondition, self).__init__(missing_evaluation)
self._feature = feature
self._mask = mask
def features(self):
return [self._feature]
def __repr__(self):
return (f"({self._feature.name} in {self._mask}; "
f"miss={self.missing_evaluation})")
def __eq__(self, other):
if not isinstance(other, CategoricalIsInCondition):
return False
return self._feature == other._feature and self._mask == other._mask
@property
def feature(self):
return self._feature
@property
def mask(self):
return self._mask
class CategoricalSetContainsCondition(AbstractCondition):
"""Condition of the form "attribute intersect [...set of items...]!=empty"."""
def __init__(self, feature: SimpleColumnSpec, mask: Union[List[str],
List[int]],
missing_evaluation: Optional[bool]):
super(CategoricalSetContainsCondition, self).__init__(missing_evaluation)
self._feature = feature
self._mask = mask
def features(self):
return [self._feature]
def __repr__(self):
return (f"({self._feature.name} intersect {self._mask} != empty; "
f"miss={self.missing_evaluation})")
def __eq__(self, other):
if not isinstance(other, CategoricalSetContainsCondition):
return False
return self._feature == other._feature and self._mask == other._mask
@property
def feature(self):
return self._feature
@property
def mask(self):
return self._mask
class NumericalSparseObliqueCondition(AbstractCondition):
"""Condition of the form "attributes * weights >= threshold"."""
def __init__(self, features: List[SimpleColumnSpec], weights: List[float],
threshold: float, missing_evaluation: Optional[bool]):
super(NumericalSparseObliqueCondition, self).__init__(missing_evaluation)
self._features = features
self._weights = weights
self._threshold = threshold
def features(self):
return self._features
def __repr__(self):
return (f"({self._features} . {self._weights} >= {self._threshold}; "
f"miss={self.missing_evaluation})")
def __eq__(self, other):
if not isinstance(other, NumericalSparseObliqueCondition):
return False
return (self._features == other._features and
self._weights == other._weights and
self._threshold == other._threshold)
@property
def weights(self):
return self._weights
@property
def threshold(self):
return self._threshold
def core_condition_to_condition(
core_condition: decision_tree_pb2.NodeCondition,
dataspec: data_spec_pb2.DataSpecification) -> AbstractCondition:
"""Converts a condition from the core to python format."""
condition_type = core_condition.condition
attribute = dataspec_lib.make_simple_column_spec(dataspec,
core_condition.attribute)
column_spec = dataspec.columns[core_condition.attribute]
if condition_type.HasField("na_condition"):
return IsMissingInCondition(attribute)
if condition_type.HasField("higher_condition"):
return NumericalHigherThanCondition(
attribute, condition_type.higher_condition.threshold,
core_condition.na_value)
if condition_type.HasField("true_value_condition"):
return IsTrueCondition(attribute, core_condition.na_value)
if condition_type.HasField("contains_bitmap_condition"):
items = column_spec_bitmap_to_items(
dataspec.columns[core_condition.attribute],
condition_type.contains_bitmap_condition.elements_bitmap)
if attribute.type == ColumnType.CATEGORICAL:
return CategoricalIsInCondition(attribute, items, core_condition.na_value)
elif attribute.type == ColumnType.CATEGORICAL_SET:
return CategoricalSetContainsCondition(attribute, items,
core_condition.na_value)
if condition_type.HasField("contains_condition"):
items = condition_type.contains_condition.elements
if not column_spec.categorical.is_already_integerized:
items = [
dataspec_lib.categorical_value_idx_to_value(column_spec, item)
for item in items
]
if attribute.type == ColumnType.CATEGORICAL:
return CategoricalIsInCondition(attribute, items, core_condition.na_value)
elif attribute.type == ColumnType.CATEGORICAL_SET:
return CategoricalSetContainsCondition(attribute, items,
core_condition.na_value)
if condition_type.HasField("discretized_higher_condition"):
threshold = dataspec_lib.discretized_numerical_to_numerical(
column_spec, condition_type.discretized_higher_condition.threshold)
return NumericalHigherThanCondition(attribute, threshold,
core_condition.na_value)
if condition_type.HasField("oblique_condition"):
attributes = [
dataspec_lib.make_simple_column_spec(dataspec, attribute_idx)
for attribute_idx in condition_type.oblique_condition.attributes
]
return NumericalSparseObliqueCondition(
attributes, list(condition_type.oblique_condition.weights),
condition_type.oblique_condition.threshold, core_condition.na_value)
raise ValueError(f"Non supported condition type: {core_condition}")
def column_spec_bitmap_to_items(column_spec: data_spec_pb2.Column,
bitmap: bytes) -> Union[List[int], List[str]]:
"""Converts a mask-bitmap into a list of elements."""
items = []
for value_idx in range(column_spec.categorical.number_of_unique_values):
byte_idx = value_idx // 8
sub_bit_idx = value_idx & 7
has_item = (bitmap[byte_idx] & (1 << sub_bit_idx)) != 0
if has_item:
items.append(
dataspec_lib.categorical_value_idx_to_value(column_spec, value_idx))
return items
def column_spec_items_to_bitmap(column_spec: data_spec_pb2.Column,
items: List[int]) -> bytes:
"""Converts a list of elements into a mask-bitmap."""
# Allocate a zero-bitmap.
bitmap = bytearray(
b"\0" * ((column_spec.categorical.number_of_unique_values + 7) // 8))
for item in items:
bitmap[item // 8] |= 1 << (item & 7)
return bytes(bitmap)
def set_core_node(condition: AbstractCondition,
dataspec: data_spec_pb2.DataSpecification,
core_node: decision_tree_pb2.Node):
"""Sets a core node (proto format) from a python value."""
core_condition = core_node.condition
core_condition.na_value = condition.missing_evaluation
features = condition.features()
if not features:
raise ValueError("Condition without features")
core_condition.attribute = dataspec_lib.column_name_to_column_idx(
features[0].name, dataspec)
feature_column = dataspec.columns[core_condition.attribute]
if isinstance(condition, IsMissingInCondition):
core_condition.condition.na_condition.SetInParent()
elif isinstance(condition, IsTrueCondition):
core_condition.condition.true_value_condition.SetInParent()
elif isinstance(condition, NumericalHigherThanCondition):
core_condition.condition.higher_condition.threshold = condition.threshold
elif isinstance(condition,
(CategoricalIsInCondition, CategoricalSetContainsCondition)):
mask = condition.mask
if mask and isinstance(mask[0], str):
# Converts the mask to a list of integers.
mask = [feature_column.categorical.items[value].index for value in mask]
# Select the most efficient way to represent the mask
if len(mask) * 32 * 8 > feature_column.categorical.number_of_unique_values:
# A bitmap is more efficient.
core_condition.condition.contains_bitmap_condition.elements_bitmap = column_spec_items_to_bitmap(
feature_column, mask)
else:
# A list of indices is more efficient.
core_condition.condition.contains_condition.elements[:] = mask
elif isinstance(condition, NumericalSparseObliqueCondition):
oblique = core_condition.condition.oblique_condition
oblique.attributes[:] = [
dataspec_lib.column_name_to_column_idx(feature.name, dataspec)
for feature in features
]
oblique.weights[:] = condition.weights
oblique.threshold = condition.threshold
else:
raise NotImplementedError("No supported value type")
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegexp(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegexp(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
@test_util.run_deprecated_v1
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
@test_util.run_deprecated_v1
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
@test_util.run_deprecated_v1
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFuncionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFuncionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
is_building_function = ops.get_default_graph().building_function
if is_building_function:
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFuncionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
| |
import asyncio
import json
import mimetypes
import os
import re
import time
from aiohttp import Response, EofStream
from aiohttp.server import ServerHttpProtocol
from aiohttp.websocket import do_handshake, MSG_PING, MSG_TEXT, MSG_CLOSE
COMMANDS = {
'ping': re.compile(r'PING :(?P<data>.*)'),
'join': re.compile(r':(?P<nick>\S+)!\S+@\S+ JOIN (?P<channel>\S+)'),
'message': re.compile(r':(?P<nick>\S+)!\S+@\S+ PRIVMSG (?P<target>\S+) :\s*(?P<data>\S+.*)$'),
'names': re.compile(r':(?P<mask>\S+) 353 (?P<nick>\S+) @ (?P<channel>\S+) :\s*(?P<names>.*)\s+:(?P=mask) 366'),
'notice': re.compile(r':(?P<nick>\S+)!\S+@\S+ NOTICE (?P<target>\S+) :\s*(?P<data>\S+.*)$'),
'part': re.compile(r':(?P<nick>\S+)!\S+@\S+ PART (?P<channels>\S+.*)$'),
'quit': re.compile(r':(?P<nick>\S+)!\S+@\S+ QUIT :\s*(?P<data>\S+.*)$'),
}
class IRCClient(asyncio.Protocol):
"""Base IRC client protocol."""
def __init__(self, ws):
self.ws = ws
super().__init__()
def connection_made(self, transport):
self.transport = transport
self.closed = False
self.nick = None
self.channel = None
self.joined = False
self.ws.send(json.dumps({'status': 'connected'}))
def data_received(self, data):
message = data.decode('utf8', 'ignore')
handled = False
for cmd, regex in COMMANDS.items():
match = regex.search(message)
if match:
func = getattr(self, 'on_%s' % cmd, None)
if func is not None:
func(**match.groupdict())
handled = True
break
if not handled:
self.ws.send(message)
def connection_lost(self, exc):
self.close()
self.ws.send(json.dumps({'status': 'disconnected'}))
def on_join(self, nick, channel):
if channel == self.channel:
if nick == self.nick:
self.joined = True
self.ws.send(json.dumps({'status': 'joined', 'channel': channel}))
else:
self.ws.send(json.dumps({'member': nick, 'action': 'add'}))
def on_message(self, nick, target, data):
self.ws.send(json.dumps({
'nick': nick,
'target': target,
'message': data,
}))
def on_names(self, mask, nick, channel, names):
if channel == self.channel and nick == self.nick:
for member in names.split(' '):
self.ws.send(json.dumps({
'member': member.lstrip('@').lstrip('+'),
'action': 'add'
}))
def on_notice(self, nick, target, data):
self.ws.send(json.dumps({
'nick': nick,
'target': target,
'notice': data,
}))
def on_part(self, nick, channels):
for channel in channels.split(','):
if channel == this.channel:
self.ws.send(json.dumps({'member': nick, 'action': 'remove'}))
def on_ping(self, data):
self.send('PONG %s' % data)
def on_quit(self, nick, data):
self.ws.send(json.dumps({'member': nick, 'action': 'remove'}))
def send(self, message):
if message:
if not message.endswith('\r\n'):
message += '\r\n'
self.transport.write(message.encode('utf8'))
def close(self):
if not self.closed:
try:
self.send('QUIT')
self.transport.close()
finally:
self.closed = True
def login(self, username, channel, nick=None, password=None):
self.nick = nick or username
self.channel = channel
if password is not None:
self.send('PASS %s' % password)
self.send('USER %s irc.freenode.net irc.freenode.net Test IRC WebClient' % username)
self.send('NICK %s' % self.nick)
self.send('JOIN %s' % self.channel)
def message(self, message):
# TODO: Handle message when the connection has closed.
if self.joined:
self.send('PRIVMSG {0} :{1}'.format(self.channel, message))
class WebClient(object):
"""Encapsulation of client logic."""
def __init__(self, loop, reader, writer):
self.loop = loop
self.reader = reader
self.writer = writer
@asyncio.coroutine
def run(self):
"""Main loop for reading from the socket and delegating messages."""
_, self.irc = yield from self.loop.create_connection(lambda: IRCClient(ws=self), 'irc.freenode.net', 6667)
while True:
try:
msg = yield from self.reader.read()
except EofStream:
# client droped connection
break
else:
if msg.tp == MSG_PING:
self.writer.pong()
elif msg.tp == MSG_TEXT:
data = msg.data.strip()
self.on_message(data)
elif msg.tp == MSG_CLOSE:
break
self.irc.close()
def on_message(self, message):
"""Handle incoming message from the socket."""
try:
data = json.loads(message)
except ValueError:
print('Received non-JSON message: %s' % message)
else:
if 'action' in data:
action = data.pop('action')
if action == 'login':
self.irc.login(**data)
elif 'message' in data:
# Pass message to IRC connection
self.irc.message(data['message'])
def send(self, message):
"""Send message to the websocket."""
self.writer.send(message.encode('utf8'))
class HttpServer(ServerHttpProtocol):
@asyncio.coroutine
def handle_request(self, message, payload):
now = time.time()
upgrade = False
for hdr, val in message.headers:
if hdr == 'UPGRADE':
upgrade = 'websocket' in val.lower()
break
if upgrade:
# websocket handshake
status, headers, parser, writer = do_handshake(
message.method, message.headers, self.transport)
resp = Response(self.transport, status)
resp.add_headers(*headers)
resp.send_headers()
# install websocket parser
reader = self.stream.set_parser(parser)
client = WebClient(self._loop, reader, writer)
yield from client.run()
else:
# Serve static files
response = Response(self.transport, 200)
response.add_header('Transfer-Encoding', 'chunked')
path = message.path.lstrip('/') or 'index.html'
content_type, _ = mimetypes.guess_type(path)
response.add_header('Content-type', content_type or 'text/html')
response.send_headers()
try:
with open(os.path.join('static', path), 'rb') as fp:
chunk = fp.read(8196)
while chunk:
if not response.write(chunk):
break
chunk = fp.read(8196)
except OSError:
drain = super().handle_request(message, payload)
else:
drain = response.write_eof()
if response.keep_alive():
self.keep_alive(True)
self.log_access(message, None, response, time.time() - now)
return drain
def main():
"""Main function for running the server from the command line."""
loop = asyncio.get_event_loop()
factory = loop.create_server(lambda: HttpServer(debug=True, keep_alive=75), '127.0.0.1', 8000)
server = loop.run_until_complete(factory)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| |
# The Nexus software is licensed under the BSD 2-Clause license.
#
# You should have recieved a copy of this license with the software.
# If you did not, you can find one at the following link.
#
# http://opensource.org/licenses/bsd-license.php
import reqs.pil, urllib, StringIO
from reqs.twisted.internet import reactor
from core.plugins import ProtocolPlugin
from core.decorators import *
from core.constants import *
class ImagedrawPlugin(ProtocolPlugin):
commands = {
"rec_url": "commandRec_url",
"imagedraw": "commandImagedraw",
}
@build_list
@admin_only
def commandRec_url(self, parts, fromloc, overriderank):
"/rec_url URL - Builder\nRecords an url to later imagedraw it."
if len(parts) == 1:
self.client.sendServerMessage("Please specify an url (and '//' in the beginning to")
self.client.sendServerMessage("extend an existing url)")
return
else:
if parts[0] == '//rec_url':
try:
self.client.url = self.client.url + (" ".join(parts[1:])).strip()
except:
self.client.sendServerMessage("You have not started recording and url yet")
return
else:
self.client.url = (" ".join(parts[1:])).strip()
var_divisions64 = int(len(self.client.url)/64)
self.client.sendServerMessage("The url has been recorded as")
for i in range(var_divisions64):
self.client.sendServerMessage(self.client.url[i*64:(i+1)*64])
self.client.sendServerMessage(self.client.url[var_divisions64*64:])
@build_list
@admin_only
def commandImagedraw(self, parts, fromloc, overriderank):
"/imagedraw [x y z x2 y2 z2] - Builder\nSets all blocks in this area to image."
if len(parts) < 8 and len(parts) != 2 and len(parts) != 3:
self.client.sendServerMessage("Please enter whether to flip? (rotation)")
self.client.sendServerMessage("(and possibly two coord triples)")
else:
if len(parts)==3:
# Try getting the rotation
try:
rotation = int(parts[2])
except ValueError:
self.client.sendServerMessage("Rotation must be a Number.")
return
else:
rotation = 0
# try to get flip?
flip = parts[1]
if flip=='true' or flip=='false':
pass
else:
self.client.sendServerMessage("flip must be true or false")
return
# try to get url
try:
imageurl = self.client.url
except:
self.client.sendServerMessage("You have not recorded an url yet (use /rec_url).")
return
if imageurl.find('http:') == -1:
self.client.sendServerMessage("You cannot access server files, only external files")
return
# If they only provided the type argument, use the last two block places
if len(parts) == 2 or len(parts) == 3:
try:
x, y, z = self.client.last_block_changes[0]
x2, y2, z2 = self.client.last_block_changes[1]
except IndexError:
self.client.sendServerMessage("You have not clicked two corners yet.")
return
else:
try:
x = int(parts[3])
y = int(parts[4])
z = int(parts[5])
x2 = int(parts[6])
y2 = int(parts[7])
z2 = int(parts[8])
except ValueError:
self.client.sendServerMessage("All coordinate parameters must be integers.")
return
if y == y2:
height = abs(x2-x)+1
width = abs(z2-z)+1
orientation = 0
elif z == z2:
height = abs(x2-x)+1
width = abs(y2-y)+1
orientation = 1
else:
x2 = x
height = abs(y2-y)+1
width = abs(z2-z)+1
orientation = 2
try:
u = urllib.urlopen(imageurl)
f = StringIO.StringIO(u.read())
image = Image.open(f)
except:
self.client.sendServerMessage("The url or image is invalid")
return
if rotation != 0:
image = image.rotate(rotation,3,1)
image = image.resize((width,height),1)
image = image.convert('RGBA')
if x > x2:
x, x2 = x2, x
if y > y2:
y, y2 = y2, y
if z > z2:
z, z2 = z2, z
if self.client.isDirectorPlus() or overriderank:
limit = self.client.factory.build_director
elif self.client.isAdmin() or self.client.isCoder():
limit = self.client.factory.build_admin
elif self.client.isMod():
limit = self.client.factory.build_mod
elif self.client.isOp() or self.client.isWorldOwner():
limit = self.client.factory.build_op
else:
limit = self.client.factory.build_other
# Stop them doing silly things
if height*width > limit:
self.client.sendServerMessage("Sorry, that area is too big for you to imagedraw.")
return
# Draw all the blocks on, I guess
# We use a generator so we can slowly release the blocks
# We also keep world as a local so they can't change worlds and affect the new one
world = self.client.world
def generate_changes():
for i in range(x, x2+1):
for j in range(y, y2+1):
for k in range(z, z2+1):
if not self.client.AllowedToBuild(i, j, k) and not overriderank:
return
if flip=='true':
if orientation == 0:
r,g,b,a = image.getpixel((abs(k-z),abs(i-x)))
elif orientation == 1:
r,g,b,a = image.getpixel((abs(j-y),abs(i-x)))
else:
r,g,b,a = image.getpixel((abs(k-z),abs(j-y)))
else:
if orientation == 0:
r,g,b,a = image.getpixel((width - (abs(k-z)+1),abs(i-x)))
elif orientation == 1:
r,g,b,a = image.getpixel((width - (abs(j-y)+1),abs(i-x)))
else:
r,g,b,a = image.getpixel((width - (abs(k-z)+1),abs(j-y)))
if a < 25:
block = BLOCK_AIR
else:
r = int(round(float(r)/50))*50
if r == 0:
r = 50
if r == 250:
r = 200
g = int(round(float(g)/50))*50
if g == 0:
g = 50
if g == 250:
g = 200
b = int(round(float(b)/50))*50
if b == 0:
b = 50
if b == 250:
b = 200
if r == 50:
if g == 100:
g = 50
if g == 150:
g = 200
if r == 100:
if g == 50:
g = 100
if g == 200:
g = 150
if r == 150:
if g == 100:
g = 150
if r==50 and g==50:
b = 50
if r==50 and g==200:
if b == 100:
b = 150
if r==100 and g==100:
b = 100
if r==100 and g==150:
b = 200
if r==150 and g==50:
b = 200
if r==150 and g==150:
if b == 50 or b == 100:
b = 150
if r==150 and g==200:
b = 50
if r==200 and g==50:
if b == 100:
b = 150
if r==200 and g==100:
b = 200
if r==200 and g==150:
b = 50
if r==200 and g==200:
if b == 100:
b = 50
if b == 150:
b = 200
if (r,g,b) == (200,50,50):
block = BLOCK_RED
if (r,g,b) == (200,150,50):
block = BLOCK_ORANGE
if (r,g,b) == (200,200,50):
block = BLOCK_YELLOW
if (r,g,b) == (150,200,50):
block = BLOCK_LIME
if (r,g,b) == (50,200,50):
block = BLOCK_GREEN
if (r,g,b) == (50,200,150):
block = BLOCK_TURQUOISE
if (r,g,b) == (50,200,200):
block = BLOCK_CYAN
if (r,g,b) == (100,150,200):
block = BLOCK_BLUE
if (r,g,b) == (150,150,200):
block = BLOCK_INDIGO
if (r,g,b) == (150,50,200):
block = BLOCK_VIOLET
if (r,g,b) == (200,100,200):
block = BLOCK_PURPLE
if (r,g,b) == (200,50,200):
block = BLOCK_MAGENTA
if (r,g,b) == (200,50,150):
block = BLOCK_PINK
if (r,g,b) == (100,100,100):
block = BLOCK_BLACK
if (r,g,b) == (150,150,150):
block = BLOCK_GRAY
if (r,g,b) == (200,200,200):
block = BLOCK_WHITE
if (r,g,b) == (50,50,50):
block = BLOCK_OBSIDIAN
block = chr(block)
try:
world[i, j, k] = block
except AssertionError:
self.client.sendServerMessage("Out of bounds imagedraw error.")
return
self.client.queueTask(TASK_BLOCKSET, (i, j, k, block), world=world)
self.client.sendBlock(i, j, k, block)
yield
# Now, set up a loop delayed by the reactor
block_iter = iter(generate_changes())
def do_step():
# Do 10 blocks
try:
for x in range(10): # 10 blocks at a time, 10 blocks per tenths of a second, 100 blocks a second
block_iter.next()
reactor.callLater(0.01, do_step) # This is how long (in seconds) it waits to run another 10 blocks
except StopIteration:
if fromloc == "user":
self.client.sendServerMessage("Your imagedraw just completed.")
pass
do_step()
| |
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import stat
import subprocess
from threading import Thread
from time import time, sleep
import os.path
from os.path import exists, join, expanduser
from mycroft import MYCROFT_ROOT_PATH
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration
from mycroft.tts import TTS, TTSValidator
from mycroft.util.download import download
from mycroft.util.log import LOG
config = Configuration.get().get("tts").get("mimic")
data_dir = expanduser(Configuration.get()['data_dir'])
BIN = config.get("path",
os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic'))
if not os.path.isfile(BIN):
# Search for mimic on the path
import distutils.spawn
BIN = distutils.spawn.find_executable("mimic")
SUBSCRIBER_VOICES = {'trinity': join(data_dir, 'voices/mimic_tn')}
def download_subscriber_voices(selected_voice):
"""
Function to download all premium voices, starting with
the currently selected if applicable
"""
def make_executable(dest):
""" Call back function to make the downloaded file executable. """
LOG.info('Make executable')
# make executable
st = os.stat(dest)
os.chmod(dest, st.st_mode | stat.S_IEXEC)
# First download the selected voice if needed
voice_file = SUBSCRIBER_VOICES.get(selected_voice)
if voice_file is not None and not exists(voice_file):
LOG.info('voice doesn\'t exist, downloading')
url = DeviceApi().get_subscriber_voice_url(selected_voice)
# Check we got an url
if url:
dl = download(url, voice_file, make_executable)
# Wait for completion
while not dl.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(selected_voice))
# Download the rest of the subsciber voices as needed
for voice in SUBSCRIBER_VOICES:
voice_file = SUBSCRIBER_VOICES[voice]
if not exists(voice_file):
url = DeviceApi().get_subscriber_voice_url(voice)
# Check we got an url
if url:
dl = download(url, voice_file, make_executable)
# Wait for completion
while not dl.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(voice))
class Mimic(TTS):
def __init__(self, lang, config):
super(Mimic, self).__init__(
lang, config, MimicValidator(self), 'wav',
ssml_tags=["speak", "ssml", "phoneme", "voice", "audio", "prosody"]
)
self.dl = None
self.clear_cache()
# Download subscriber voices if needed
self.is_subscriber = DeviceApi().is_subscriber
if self.is_subscriber:
t = Thread(target=download_subscriber_voices, args=[self.voice])
t.daemon = True
t.start()
def modify_tag(self, tag):
for key, value in [
('x-slow', '0.4'),
('slow', '0.7'),
('medium', '1.0'),
('high', '1.3'),
('x-high', '1.6'),
('speed', 'rate')
]:
tag = tag.replace(key, value)
return tag
@property
def args(self):
""" Build mimic arguments. """
if (self.voice in SUBSCRIBER_VOICES and
exists(SUBSCRIBER_VOICES[self.voice]) and self.is_subscriber):
# Use subscriber voice
mimic_bin = SUBSCRIBER_VOICES[self.voice]
voice = self.voice
elif self.voice in SUBSCRIBER_VOICES:
# Premium voice but bin doesn't exist, use ap while downloading
mimic_bin = BIN
voice = 'ap'
else:
# Normal case use normal binary and selected voice
mimic_bin = BIN
voice = self.voice
args = [mimic_bin, '-voice', voice, '-psdur', '-ssml']
stretch = config.get('duration_stretch', None)
if stretch:
args += ['--setf', 'duration_stretch=' + stretch]
return args
def get_tts(self, sentence, wav_file):
# Generate WAV and phonemes
phonemes = subprocess.check_output(self.args + ['-o', wav_file,
'-t', sentence])
return wav_file, phonemes.decode()
def visime(self, output):
visimes = []
start = time()
pairs = str(output).split(" ")
for pair in pairs:
pho_dur = pair.split(":") # phoneme:duration
if len(pho_dur) == 2:
visimes.append((VISIMES.get(pho_dur[0], '4'),
float(pho_dur[1])))
print(visimes)
return visimes
class MimicValidator(TTSValidator):
def __init__(self, tts):
super(MimicValidator, self).__init__(tts)
def validate_lang(self):
# TODO: Verify version of mimic can handle the requested language
pass
def validate_connection(self):
try:
subprocess.call([BIN, '--version'])
except:
LOG.info("Failed to find mimic at: " + BIN)
raise Exception(
'Mimic was not found. Run install-mimic.sh to install it.')
def get_tts_class(self):
return Mimic
# Mapping based on Jeffers phoneme to viseme map, seen in table 1 from:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.221.6377&rep=rep1&type=pdf
#
# Mycroft unit visemes based on images found at:
# http://www.web3.lu/wp-content/uploads/2014/09/visemes.jpg
#
# Mapping was created partially based on the "12 mouth shapes visuals seen at:
# https://wolfpaulus.com/journal/software/lipsynchronization/
VISIMES = {
# /A group
'v': '5',
'f': '5',
# /B group
'uh': '2',
'w': '2',
'uw': '2',
'er': '2',
'r': '2',
'ow': '2',
# /C group
'b': '4',
'p': '4',
'm': '4',
# /D group
'aw': '1',
# /E group
'th': '3',
'dh': '3',
# /F group
'zh': '3',
'ch': '3',
'sh': '3',
'jh': '3',
# /G group
'oy': '6',
'ao': '6',
# /Hgroup
'z': '3',
's': '3',
# /I group
'ae': '0',
'eh': '0',
'ey': '0',
'ah': '0',
'ih': '0',
'y': '0',
'iy': '0',
'aa': '0',
'ay': '0',
'ax': '0',
'hh': '0',
# /J group
'n': '3',
't': '3',
'd': '3',
'l': '3',
# /K group
'g': '3',
'ng': '3',
'k': '3',
# blank mouth
'pau': '4',
}
| |
r"""
Wigner, Clebsch-Gordan, Racah, and Gaunt coefficients
Collection of functions for calculating Wigner 3j, 6j, 9j,
Clebsch-Gordan, Racah as well as Gaunt coefficients exactly, all
evaluating to a rational number times the square root of a rational
number [Rasch03]_.
Please see the description of the individual functions for further
details and examples.
REFERENCES:
.. [Rasch03] J. Rasch and A. C. H. Yu, 'Efficient Storage Scheme for
Pre-calculated Wigner 3j, 6j and Gaunt Coefficients', SIAM
J. Sci. Comput. Volume 25, Issue 4, pp. 1416-1428 (2003)
This code was taken from Sage with the permission of all authors:
http://groups.google.com/group/sage-devel/browse_thread/thread/33835976efbb3b7f
AUTHORS:
- Jens Rasch (2009-03-24): initial version for Sage
- Jens Rasch (2009-05-31): updated to sage-4.0
Copyright (C) 2008 Jens Rasch <jyr2000@gmail.com>
"""
from sympy import Integer, pi, sqrt
#from sage.rings.complex_number import ComplexNumber
#from sage.rings.finite_rings.integer_mod import Mod
# This list of precomputed factorials is needed to massively
# accelerate future calculations of the various coefficients
_Factlist=[1]
def _calc_factlist(nn):
r"""
Function calculates a list of precomputed factorials in order to
massively accelerate future calculations of the various
coefficients.
INPUT:
- ``nn`` - integer, highest factorial to be computed
OUTPUT:
list of integers -- the list of precomputed factorials
EXAMPLES:
Calculate list of factorials::
sage: from sage.functions.wigner import _calc_factlist
sage: _calc_factlist(10)
[1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800]
"""
if nn >= len(_Factlist):
for ii in range(len(_Factlist), nn + 1):
_Factlist.append(_Factlist[ii - 1] * ii)
return _Factlist[:int(nn) + 1]
def wigner_3j(j_1, j_2, j_3, m_1, m_2, m_3, prec=None):
r"""
Calculate the Wigner 3j symbol `Wigner3j(j_1,j_2,j_3,m_1,m_2,m_3)`.
INPUT:
- ``j_1``, ``j_2``, ``j_3``, ``m_1``, ``m_2``, ``m_3`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
EXAMPLES::
sage: wigner_3j(2, 6, 4, 0, 0, 0)
sqrt(5/143)
sage: wigner_3j(2, 6, 4, 0, 0, 1)
0
sage: wigner_3j(0.5, 0.5, 1, 0.5, -0.5, 0)
sqrt(1/6)
sage: wigner_3j(40, 100, 60, -10, 60, -50)
95608/18702538494885*sqrt(21082735836735314343364163310/220491455010479533763)
sage: wigner_3j(2500, 2500, 5000, 2488, 2400, -4888, prec=64)
7.60424456883448589e-12
It is an error to have arguments that are not integer or half
integer values::
sage: wigner_3j(2.1, 6, 4, 0, 0, 0)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer
sage: wigner_3j(2, 6, 4, 1, 0, -1.1)
Traceback (most recent call last):
...
ValueError: m values must be integer or half integer
NOTES:
The Wigner 3j symbol obeys the following symmetry rules:
- invariant under any permutation of the columns (with the
exception of a sign change where `J:=j_1+j_2+j_3`):
.. math::
Wigner3j(j_1,j_2,j_3,m_1,m_2,m_3)
=Wigner3j(j_3,j_1,j_2,m_3,m_1,m_2)
=Wigner3j(j_2,j_3,j_1,m_2,m_3,m_1)
=(-1)^J Wigner3j(j_3,j_2,j_1,m_3,m_2,m_1)
=(-1)^J Wigner3j(j_1,j_3,j_2,m_1,m_3,m_2)
=(-1)^J Wigner3j(j_2,j_1,j_3,m_2,m_1,m_3)
- invariant under space inflection, i.e.
.. math::
Wigner3j(j_1,j_2,j_3,m_1,m_2,m_3)
=(-1)^J Wigner3j(j_1,j_2,j_3,-m_1,-m_2,-m_3)
- symmetric with respect to the 72 additional symmetries based on
the work by [Regge58]_
- zero for `j_1`, `j_2`, `j_3` not fulfilling triangle relation
- zero for `m_1 + m_2 + m_3 \neq 0`
- zero for violating any one of the conditions
`j_1 \ge |m_1|`, `j_2 \ge |m_2|`, `j_3 \ge |m_3|`
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 3j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
REFERENCES:
.. [Regge58] 'Symmetry Properties of Clebsch-Gordan Coefficients',
T. Regge, Nuovo Cimento, Volume 10, pp. 544 (1958)
.. [Edmonds74] 'Angular Momentum in Quantum Mechanics',
A. R. Edmonds, Princeton University Press (1974)
AUTHORS:
- Jens Rasch (2009-03-24): initial version
"""
if int(j_1 * 2) != j_1 * 2 or int(j_2 * 2) != j_2 * 2 or \
int(j_3 * 2) != j_3 * 2:
raise ValueError("j values must be integer or half integer")
if int(m_1 * 2) != m_1 * 2 or int(m_2 * 2) != m_2 * 2 or \
int(m_3 * 2) != m_3 * 2:
raise ValueError("m values must be integer or half integer")
if m_1 + m_2 + m_3 != 0:
return 0
prefid = Integer((-1) ** int(j_1 - j_2 - m_3))
m_3 = -m_3
a1 = j_1 + j_2 - j_3
if a1 < 0:
return 0
a2 = j_1 - j_2 + j_3
if a2 < 0:
return 0
a3 = -j_1 + j_2 + j_3
if a3 < 0:
return 0
if (abs(m_1) > j_1) or (abs(m_2) > j_2) or (abs(m_3) > j_3):
return 0
maxfact = max(j_1 + j_2 + j_3 + 1, j_1 + abs(m_1), j_2 + abs(m_2), \
j_3 + abs(m_3))
_calc_factlist(maxfact)
argsqrt = Integer(_Factlist[int(j_1 + j_2 - j_3)] * \
_Factlist[int(j_1 - j_2 + j_3)] * \
_Factlist[int(-j_1 + j_2 + j_3)] * \
_Factlist[int(j_1 - m_1)] * \
_Factlist[int(j_1 + m_1)] * \
_Factlist[int(j_2 - m_2)] * \
_Factlist[int(j_2 + m_2)] * \
_Factlist[int(j_3 - m_3)] * \
_Factlist[int(j_3 + m_3)]) / \
_Factlist[int(j_1 + j_2 + j_3 + 1)]
ressqrt = sqrt(argsqrt)
if ressqrt.is_complex:
ressqrt = ressqrt.as_real_imag()[0]
imin = max(-j_3 + j_1 + m_2, -j_3 + j_2 - m_1, 0)
imax = min(j_2 + m_2, j_1 - m_1, j_1 + j_2 - j_3)
sumres = 0
for ii in range(imin, imax + 1):
den = _Factlist[ii] * \
_Factlist[int(ii + j_3 - j_1 - m_2)] * \
_Factlist[int(j_2 + m_2 - ii)] * \
_Factlist[int(j_1 - ii - m_1)] * \
_Factlist[int(ii + j_3 - j_2 + m_1)] * \
_Factlist[int(j_1 + j_2 - j_3 - ii)]
sumres = sumres + Integer((-1) ** ii) / den
res = ressqrt * sumres * prefid
return res
def clebsch_gordan(j_1, j_2, j_3, m_1, m_2, m_3, prec=None):
r"""
Calculates the Clebsch-Gordan coefficient
`\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \rangle`.
The reference for this function is [Edmonds74]_.
INPUT:
- ``j_1``, ``j_2``, ``j_3``, ``m_1``, ``m_2``, ``m_3`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
EXAMPLES::
>>> from sympy import S
>>> from sympy.physics.wigner import clebsch_gordan
>>> clebsch_gordan(S(3)/2, S(1)/2, 2, S(3)/2, S(1)/2, 2)
1
>>> clebsch_gordan(S(3)/2, S(1)/2, 1, S(3)/2, -S(1)/2, 1)
3**(1/2)/2
>>> clebsch_gordan(S(3)/2, S(1)/2, 1, -S(1)/2, S(1)/2, 0)
-2**(1/2)/2
NOTES:
The Clebsch-Gordan coefficient will be evaluated via its relation
to Wigner 3j symbols:
.. math::
\langle j_1 m_1 \; j_2 m_2 | j_3 m_3 \rangle
=(-1)^{j_1-j_2+m_3} \sqrt{2j_3+1} \;
Wigner3j(j_1,j_2,j_3,m_1,m_2,-m_3)
See also the documentation on Wigner 3j symbols which exhibit much
higher symmetry relations than the Clebsch-Gordan coefficient.
AUTHORS:
- Jens Rasch (2009-03-24): initial version
"""
res = (-1) ** int(j_1 - j_2 + m_3) * sqrt(2 * j_3 + 1) * \
wigner_3j(j_1, j_2, j_3, m_1, m_2, -m_3, prec)
return res
def _big_delta_coeff(aa, bb, cc, prec=None):
r"""
Calculates the Delta coefficient of the 3 angular momenta for
Racah symbols. Also checks that the differences are of integer
value.
INPUT:
- ``aa`` - first angular momentum, integer or half integer
- ``bb`` - second angular momentum, integer or half integer
- ``cc`` - third angular momentum, integer or half integer
- ``prec`` - precision of the ``sqrt()`` calculation
OUTPUT:
double - Value of the Delta coefficient
EXAMPLES::
sage: from sage.functions.wigner import _big_delta_coeff
sage: _big_delta_coeff(1,1,1)
1/2*sqrt(1/6)
"""
if int(aa + bb - cc) != (aa + bb - cc):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(aa + cc - bb) != (aa + cc - bb):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if int(bb + cc - aa) != (bb + cc - aa):
raise ValueError("j values must be integer or half integer and fulfill the triangle relation")
if (aa + bb - cc) < 0:
return 0
if (aa + cc - bb) < 0:
return 0
if (bb + cc - aa) < 0:
return 0
maxfact = max(aa + bb - cc, aa + cc - bb, bb + cc - aa, aa + bb + cc + 1)
_calc_factlist(maxfact)
argsqrt = Integer(_Factlist[int(aa + bb - cc)] * \
_Factlist[int(aa + cc - bb)] * \
_Factlist[int(bb + cc - aa)]) / \
Integer(_Factlist[int(aa + bb + cc + 1)])
ressqrt = argsqrt.sqrt(prec)
if type(ressqrt) is ComplexNumber:
res = ressqrt.real()
else:
res = ressqrt
return res
def racah(aa, bb, cc, dd, ee, ff, prec=None):
r"""
Calculate the Racah symbol `W(a,b,c,d;e,f)`.
INPUT:
- ``a``, ..., ``f`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
EXAMPLES::
sage: racah(3,3,3,3,3,3)
-1/14
NOTES:
The Racah symbol is related to the Wigner 6j symbol:
.. math::
Wigner6j(j_1,j_2,j_3,j_4,j_5,j_6)
=(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6)
Please see the 6j symbol for its much richer symmetries and for
additional properties.
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 6j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
AUTHORS:
- Jens Rasch (2009-03-24): initial version
"""
prefac = _big_delta_coeff(aa, bb, ee, prec) * \
_big_delta_coeff(cc, dd, ee, prec) * \
_big_delta_coeff(aa, cc, ff, prec) * \
_big_delta_coeff(bb, dd, ff, prec)
if prefac == 0:
return 0
imin = max(aa + bb + ee, cc + dd + ee, aa + cc + ff, bb + dd + ff)
imax = min(aa + bb + cc + dd, aa + dd + ee + ff, bb + cc + ee + ff)
maxfact = max(imax + 1, aa + bb + cc + dd, aa + dd + ee + ff, \
bb + cc + ee + ff)
_calc_factlist(maxfact)
sumres = 0
for kk in range(imin, imax + 1):
den = _Factlist[int(kk - aa - bb - ee)] * \
_Factlist[int(kk - cc - dd - ee)] * \
_Factlist[int(kk - aa - cc - ff)] * \
_Factlist[int(kk - bb - dd - ff)] * \
_Factlist[int(aa + bb + cc + dd - kk)] * \
_Factlist[int(aa + dd + ee + ff - kk)] * \
_Factlist[int(bb + cc + ee + ff - kk)]
sumres = sumres + Integer((-1) ** kk * _Factlist[kk + 1]) / den
res = prefac * sumres * (-1) ** int(aa + bb + cc + dd)
return res
def wigner_6j(j_1, j_2, j_3, j_4, j_5, j_6, prec=None):
r"""
Calculate the Wigner 6j symbol `Wigner6j(j_1,j_2,j_3,j_4,j_5,j_6)`.
INPUT:
- ``j_1``, ..., ``j_6`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
EXAMPLES::
sage: wigner_6j(3,3,3,3,3,3)
-1/14
sage: wigner_6j(5,5,5,5,5,5)
1/52
sage: wigner_6j(6,6,6,6,6,6)
309/10868
sage: wigner_6j(8,8,8,8,8,8)
-12219/965770
sage: wigner_6j(30,30,30,30,30,30)
36082186869033479581/87954851694828981714124
sage: wigner_6j(0.5,0.5,1,0.5,0.5,1)
1/6
sage: wigner_6j(200,200,200,200,200,200, prec=1000)*1.0
0.000155903212413242
It is an error to have arguments that are not integer or half
integer values or do not fulfill the triangle relation::
sage: wigner_6j(2.5,2.5,2.5,2.5,2.5,2.5)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
sage: wigner_6j(0.5,0.5,1.1,0.5,0.5,1.1)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
NOTES:
The Wigner 6j symbol is related to the Racah symbol but exhibits
more symmetries as detailed below.
.. math::
Wigner6j(j_1,j_2,j_3,j_4,j_5,j_6)
=(-1)^{j_1+j_2+j_4+j_5} W(j_1,j_2,j_5,j_4,j_3,j_6)
The Wigner 6j symbol obeys the following symmetry rules:
- Wigner 6j symbols are left invariant under any permutation of
the columns:
.. math::
Wigner6j(j_1,j_2,j_3,j_4,j_5,j_6)
=Wigner6j(j_3,j_1,j_2,j_6,j_4,j_5)
=Wigner6j(j_2,j_3,j_1,j_5,j_6,j_4)
=Wigner6j(j_3,j_2,j_1,j_6,j_5,j_4)
=Wigner6j(j_1,j_3,j_2,j_4,j_6,j_5)
=Wigner6j(j_2,j_1,j_3,j_5,j_4,j_6)
- They are invariant under the exchange of the upper and lower
arguments in each of any two columns, i.e.
.. math::
Wigner6j(j_1,j_2,j_3,j_4,j_5,j_6)
=Wigner6j(j_1,j_5,j_6,j_4,j_2,j_3)
=Wigner6j(j_4,j_2,j_6,j_1,j_5,j_3)
=Wigner6j(j_4,j_5,j_3,j_1,j_2,j_6)
- additional 6 symmetries [Regge59]_ giving rise to 144 symmetries
in total
- only non-zero if any triple of `j`'s fulfill a triangle relation
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 6j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
REFERENCES:
.. [Regge59] 'Symmetry Properties of Racah Coefficients',
T. Regge, Nuovo Cimento, Volume 11, pp. 116 (1959)
"""
res = (-1) ** int(j_1 + j_2 + j_4 + j_5) * \
racah(j_1, j_2, j_5, j_4, j_3, j_6, prec)
return res
def wigner_9j(j_1, j_2, j_3, j_4, j_5, j_6, j_7, j_8, j_9, prec=None):
r"""
Calculate the Wigner 9j symbol
`Wigner9j(j_1,j_2,j_3,j_4,j_5,j_6,j_7,j_8,j_9)`.
INPUT:
- ``j_1``, ..., ``j_9`` - integer or half integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
EXAMPLES:
A couple of examples and test cases, note that for speed reasons a
precision is given::
sage: wigner_9j(1,1,1, 1,1,1, 1,1,0 ,prec=64) # ==1/18
0.0555555555555555555
sage: wigner_9j(1,1,1, 1,1,1, 1,1,1)
0
sage: wigner_9j(1,1,1, 1,1,1, 1,1,2 ,prec=64) # ==1/18
0.0555555555555555556
sage: wigner_9j(1,2,1, 2,2,2, 1,2,1 ,prec=64) # ==-1/150
-0.00666666666666666667
sage: wigner_9j(3,3,2, 2,2,2, 3,3,2 ,prec=64) # ==157/14700
0.0106802721088435374
sage: wigner_9j(3,3,2, 3,3,2, 3,3,2 ,prec=64) # ==3221*sqrt(70)/(246960*sqrt(105)) - 365/(3528*sqrt(70)*sqrt(105))
0.00944247746651111739
sage: wigner_9j(3,3,1, 3.5,3.5,2, 3.5,3.5,1 ,prec=64) # ==3221*sqrt(70)/(246960*sqrt(105)) - 365/(3528*sqrt(70)*sqrt(105))
0.0110216678544351364
sage: wigner_9j(100,80,50, 50,100,70, 60,50,100 ,prec=1000)*1.0
1.05597798065761e-7
sage: wigner_9j(30,30,10, 30.5,30.5,20, 30.5,30.5,10 ,prec=1000)*1.0 # ==(80944680186359968990/95103769817469)*sqrt(1/682288158959699477295)
0.0000325841699408828
sage: wigner_9j(64,62.5,114.5, 61.5,61,112.5, 113.5,110.5,60, prec=1000)*1.0
-3.41407910055520e-39
sage: wigner_9j(15,15,15, 15,3,15, 15,18,10, prec=1000)*1.0
-0.0000778324615309539
sage: wigner_9j(1.5,1,1.5, 1,1,1, 1.5,1,1.5)
0
It is an error to have arguments that are not integer or half
integer values or do not fulfill the triangle relation::
sage: wigner_9j(0.5,0.5,0.5, 0.5,0.5,0.5, 0.5,0.5,0.5,prec=64)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
sage: wigner_9j(1,1,1, 0.5,1,1.5, 0.5,1,2.5,prec=64)
Traceback (most recent call last):
...
ValueError: j values must be integer or half integer and fulfill the triangle relation
ALGORITHM:
This function uses the algorithm of [Edmonds74]_ to calculate the
value of the 3j symbol exactly. Note that the formula contains
alternating sums over large factorials and is therefore unsuitable
for finite precision arithmetic and only useful for a computer
algebra system [Rasch03]_.
"""
imin = 0
imax = min(j_1 + j_9, j_2 + j_6, j_4 + j_8)
sumres = 0
for kk in range(imin, imax + 1):
sumres = sumres + (2 * kk + 1) * \
racah(j_1, j_2, j_9, j_6, j_3, kk, prec) * \
racah(j_4, j_6, j_8, j_2, j_5, kk, prec) * \
racah(j_1, j_4, j_9, j_8, j_7, kk, prec)
return sumres
def gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):
r"""
Calculate the Gaunt coefficient.
The Gaunt coefficient is defined as the integral over three
spherical harmonics:
.. math::
Y(j_1,j_2,j_3,m_1,m_2,m_3)
=\int Y_{l_1,m_1}(\Omega)
Y_{l_2,m_2}(\Omega) Y_{l_3,m_3}(\Omega) d\Omega
=\sqrt{(2l_1+1)(2l_2+1)(2l_3+1)/(4\pi)}
\; Y(j_1,j_2,j_3,0,0,0) \; Y(j_1,j_2,j_3,m_1,m_2,m_3)
INPUT:
- ``l_1``, ``l_2``, ``l_3``, ``m_1``, ``m_2``, ``m_3`` - integer
- ``prec`` - precision, default: ``None``. Providing a precision can
drastically speed up the calculation.
OUTPUT:
Rational number times the square root of a rational number
(if ``prec=None``), or real number if a precision is given.
EXAMPLES::
sage: gaunt(1,0,1,1,0,-1)
-1/2/sqrt(pi)
sage: gaunt(1,0,1,1,0,0)
0
sage: gaunt(29,29,34,10,-5,-5)
1821867940156/215552371055153321*sqrt(22134)/sqrt(pi)
sage: gaunt(20,20,40,1,-1,0)
28384503878959800/74029560764440771/sqrt(pi)
sage: gaunt(12,15,5,2,3,-5)
91/124062*sqrt(36890)/sqrt(pi)
sage: gaunt(10,10,12,9,3,-12)
-98/62031*sqrt(6279)/sqrt(pi)
sage: gaunt(1000,1000,1200,9,3,-12).n(64)
0.00689500421922113448
It is an error to use non-integer values for `l` and `m`::
sage: gaunt(1.2,0,1.2,0,0,0)
Traceback (most recent call last):
...
ValueError: l values must be integer
sage: gaunt(1,0,1,1.1,0,-1.1)
Traceback (most recent call last):
...
ValueError: m values must be integer
NOTES:
The Gaunt coefficient obeys the following symmetry rules:
- invariant under any permutation of the columns
.. math::
Y(j_1,j_2,j_3,m_1,m_2,m_3)
=Y(j_3,j_1,j_2,m_3,m_1,m_2)
=Y(j_2,j_3,j_1,m_2,m_3,m_1)
=Y(j_3,j_2,j_1,m_3,m_2,m_1)
=Y(j_1,j_3,j_2,m_1,m_3,m_2)
=Y(j_2,j_1,j_3,m_2,m_1,m_3)
- invariant under space inflection, i.e.
.. math::
Y(j_1,j_2,j_3,m_1,m_2,m_3)
=Y(j_1,j_2,j_3,-m_1,-m_2,-m_3)
- symmetric with respect to the 72 Regge symmetries as inherited
for the `3j` symbols [Regge58]_
- zero for `l_1`, `l_2`, `l_3` not fulfilling triangle relation
- zero for violating any one of the conditions: `l_1 \ge |m_1|`,
`l_2 \ge |m_2|`, `l_3 \ge |m_3|`
- non-zero only for an even sum of the `l_i`, i.e.
`J=l_1+l_2+l_3=2n` for `n` in `\Bold{N}`
ALGORITHM:
This function uses the algorithm of [Liberatodebrito82]_ to
calculate the value of the Gaunt coefficient exactly. Note that
the formula contains alternating sums over large factorials and is
therefore unsuitable for finite precision arithmetic and only
useful for a computer algebra system [Rasch03]_.
REFERENCES:
.. [Liberatodebrito82] 'FORTRAN program for the integral of three
spherical harmonics', A. Liberato de Brito,
Comput. Phys. Commun., Volume 25, pp. 81-85 (1982)
AUTHORS:
- Jens Rasch (2009-03-24): initial version for Sage
"""
if int(l_1) != l_1 or int(l_2) != l_2 or int(l_3) != l_3:
raise ValueError("l values must be integer")
if int(m_1) != m_1 or int(m_2) != m_2 or int(m_3) != m_3:
raise ValueError("m values must be integer")
bigL = (l_1 + l_2 + l_3) / 2
a1 = l_1 + l_2 - l_3
if a1 < 0:
return 0
a2 = l_1 - l_2 + l_3
if a2 < 0:
return 0
a3 = -l_1 + l_2 + l_3
if a3 < 0:
return 0
if Mod(2 * bigL, 2) != 0:
return 0
if (m_1 + m_2 + m_3) != 0:
return 0
if (abs(m_1) > l_1) or (abs(m_2) > l_2) or (abs(m_3) > l_3):
return 0
imin = max(-l_3 + l_1 + m_2, -l_3 + l_2 - m_1, 0)
imax = min(l_2 + m_2, l_1 - m_1, l_1 + l_2 - l_3)
maxfact = max(l_1 + l_2 + l_3 + 1, imax + 1)
_calc_factlist(maxfact)
argsqrt = (2 * l_1 + 1) * (2 * l_2 + 1) * (2 * l_3 + 1) * \
_Factlist[l_1 - m_1] * _Factlist[l_1 + m_1] * _Factlist[l_2 - m_2] * \
_Factlist[l_2 + m_2] * _Factlist[l_3 - m_3] * _Factlist[l_3 + m_3] / \
(4*pi)
ressqrt = argsqrt.sqrt()
prefac = Integer(_Factlist[bigL] * _Factlist[l_2 - l_1 + l_3] * \
_Factlist[l_1 - l_2 + l_3] * _Factlist[l_1 + l_2 - l_3])/ \
_Factlist[2 * bigL+1]/ \
(_Factlist[bigL - l_1] * _Factlist[bigL - l_2] * _Factlist[bigL - l_3])
sumres = 0
for ii in range(imin, imax + 1):
den = _Factlist[ii] * _Factlist[ii + l_3 - l_1 - m_2] * \
_Factlist[l_2 + m_2 - ii] * _Factlist[l_1 - ii - m_1] * \
_Factlist[ii + l_3 - l_2 + m_1] * _Factlist[l_1 + l_2 - l_3 - ii]
sumres = sumres + Integer((-1) ** ii) / den
res = ressqrt * prefac * sumres * (-1) ** (bigL + l_3 + m_1 - m_2)
if prec != None:
res = res.n(prec)
return res
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class EntityOperations(object):
"""EntityOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.servicebus.management._generated.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
entity_name, # type: str
enrich=False, # type: Optional[bool]
api_version="2021_05", # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> object
"""Get the details about the Queue or Topic with the given entityName.
Get Queue or Topic.
:param entity_name: The name of the queue or topic relative to the Service Bus namespace.
:type entity_name: str
:param enrich: A query parameter that sets enrich to true or false.
:type enrich: bool
:param api_version: Api Version.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'entityName': self._serialize.url("entity_name", entity_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if enrich is not None:
query_parameters['enrich'] = self._serialize.query("enrich", enrich, 'bool')
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/xml'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{entityName}'} # type: ignore
def put(
self,
entity_name, # type: str
request_body, # type: object
api_version="2021_05", # type: Optional[str]
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> object
"""Create or update a queue or topic at the provided entityName.
:param entity_name: The name of the queue or topic relative to the Service Bus namespace.
:type entity_name: str
:param request_body: Parameters required to make or edit a queue or topic.
:type request_body: object
:param api_version: Api Version.
:type api_version: str
:param if_match: Match condition for an entity to be updated. If specified and a matching
entity is not found, an error will be raised. To force an unconditional update, set to the
wildcard character (*). If not specified, an insert will be performed when no existing entity
is found to update and a replace will be performed if an existing entity is found.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/atom+xml")
# Construct URL
url = self.put.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'entityName': self._serialize.url("entity_name", entity_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/xml'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request_body, 'object', is_xml=True)
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('object', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/{entityName}'} # type: ignore
def delete(
self,
entity_name, # type: str
api_version="2021_05", # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> object
"""Delete the Queue or Topic with the given entityName.
Delete Queue or Topic.
:param entity_name: The name of the queue or topic relative to the Service Bus namespace.
:type entity_name: str
:param api_version: Api Version.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: object, or the result of cls(response)
:rtype: object
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[object]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'entityName': self._serialize.url("entity_name", entity_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if api_version is not None:
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/xml'
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ServiceBusManagementError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('object', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/{entityName}'} # type: ignore
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron_lib.api import validators
from neutron_lib import exceptions
from oslo_log import log as logging
import six
import webob.exc
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.api.v2 import resource as api_resource
from neutron import manager
from neutron.services import service_base
LOG = logging.getLogger(__name__)
TAG = 'tag'
TAGS = TAG + 's'
MAX_TAG_LEN = 60
TAG_PLUGIN_TYPE = 'TAG'
TAG_SUPPORTED_RESOURCES = {
attributes.NETWORKS: attributes.NETWORK,
# other resources can be added
}
TAG_ATTRIBUTE_MAP = {
TAGS: {'allow_post': False, 'allow_put': False, 'is_visible': True}
}
class TagResourceNotFound(exceptions.NotFound):
message = _("Resource %(resource)s %(resource_id)s could not be found.")
class TagNotFound(exceptions.NotFound):
message = _("Tag %(tag)s could not be found.")
def get_parent_resource_and_id(kwargs):
for key in kwargs:
for resource in TAG_SUPPORTED_RESOURCES:
if key == TAG_SUPPORTED_RESOURCES[resource] + '_id':
return resource, kwargs[key]
return None, None
def validate_tag(tag):
msg = validators.validate_string(tag, MAX_TAG_LEN)
if msg:
raise exceptions.InvalidInput(error_message=msg)
def validate_tags(body):
if 'tags' not in body:
raise exceptions.InvalidInput(error_message="Invalid tags body.")
msg = validators.validate_list_of_unique_strings(body['tags'], MAX_TAG_LEN)
if msg:
raise exceptions.InvalidInput(error_message=msg)
class TagController(object):
def __init__(self):
self.plugin = (manager.NeutronManager.get_service_plugins()
[TAG_PLUGIN_TYPE])
def index(self, request, **kwargs):
# GET /v2.0/networks/{network_id}/tags
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.get_tags(request.context, parent, parent_id)
def show(self, request, id, **kwargs):
# GET /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.get_tag(request.context, parent, parent_id, id)
def create(self, request, **kwargs):
# not supported
# POST /v2.0/networks/{network_id}/tags
raise webob.exc.HTTPNotFound("not supported")
def update(self, request, id, **kwargs):
# PUT /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.update_tag(request.context, parent, parent_id, id)
def update_all(self, request, body, **kwargs):
# PUT /v2.0/networks/{network_id}/tags
# body: {"tags": ["aaa", "bbb"]}
validate_tags(body)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.update_tags(request.context, parent, parent_id,
body)
def delete(self, request, id, **kwargs):
# DELETE /v2.0/networks/{network_id}/tags/{tag}
# id == tag
validate_tag(id)
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.delete_tag(request.context, parent, parent_id, id)
def delete_all(self, request, **kwargs):
# DELETE /v2.0/networks/{network_id}/tags
parent, parent_id = get_parent_resource_and_id(kwargs)
return self.plugin.delete_tags(request.context, parent, parent_id)
class Tag(extensions.ExtensionDescriptor):
"""Extension class supporting tags."""
@classmethod
def get_name(cls):
return "Tag support"
@classmethod
def get_alias(cls):
return "tag"
@classmethod
def get_description(cls):
return "Enables to set tag on resources."
@classmethod
def get_updated(cls):
return "2016-01-01T00:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
action_status = {'index': 200, 'show': 204, 'update': 201,
'update_all': 200, 'delete': 204, 'delete_all': 204}
controller = api_resource.Resource(TagController(),
base.FAULT_MAP,
action_status=action_status)
collection_methods = {"delete_all": "DELETE",
"update_all": "PUT"}
exts = []
for collection_name, member_name in TAG_SUPPORTED_RESOURCES.items():
parent = {'member_name': member_name,
'collection_name': collection_name}
exts.append(extensions.ResourceExtension(
TAGS, controller, parent,
collection_methods=collection_methods))
return exts
def get_extended_resources(self, version):
if version != "2.0":
return {}
EXTENDED_ATTRIBUTES_2_0 = {}
for collection_name in TAG_SUPPORTED_RESOURCES:
EXTENDED_ATTRIBUTES_2_0[collection_name] = TAG_ATTRIBUTE_MAP
return EXTENDED_ATTRIBUTES_2_0
@six.add_metaclass(abc.ABCMeta)
class TagPluginBase(service_base.ServicePluginBase):
"""REST API to operate the Tag."""
def get_plugin_description(self):
return "Tag support"
def get_plugin_type(self):
return TAG_PLUGIN_TYPE
@abc.abstractmethod
def get_tags(self, context, resource, resource_id):
pass
@abc.abstractmethod
def get_tag(self, context, resource, resource_id, tag):
pass
@abc.abstractmethod
def update_tags(self, context, resource, resource_id, body):
pass
@abc.abstractmethod
def update_tag(self, context, resource, resource_id, tag):
pass
@abc.abstractmethod
def delete_tags(self, context, resource, resource_id):
pass
@abc.abstractmethod
def delete_tag(self, context, resource, resource_id, tag):
pass
| |
from unittest import TestCase
from django.contrib.auth.models import AnonymousUser
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.template import Template, Context
from django.test.client import RequestFactory
from likes.middleware import SecretBallotUserIpUseragentMiddleware
from likes.views import can_vote_test, like
from secretballot import views
from secretballot.models import Vote
class BaseClassifierTestCase(object):
def setUp(self):
if 'CLASSIFIER_CONFIG' not in self.config:
self.config['CLASSIFIER_CONFIG'] = {}
self.classifier = self.classifier_class(
**self.config['CLASSIFIER_CONFIG']
)
self.clear()
self.classifier = self.classifier_class(
**self.config['CLASSIFIER_CONFIG']
)
def test_init(self):
# State's counts should be loaded in the classifier.
# On initial creation of classifier counts are 0.
self.failUnlessEqual(self.classifier.nspam, 0)
self.failUnlessEqual(self.classifier.nham, 0)
# On subsequent load counts are loaded from state.
self.classifier.nspam = 10
self.classifier.nham = 20
self.classifier.store()
self.classifier = self.classifier_class(
**self.config['CLASSIFIER_CONFIG']
)
self.failUnlessEqual(self.classifier.nspam, 10)
self.failUnlessEqual(self.classifier.nham, 20)
def test_store(self):
state = self.classifier.get_state()
self.failIfEqual(
state.spam_count,
50,
'Internal checking test is not testing existing values'
)
self.failIfEqual(
state.ham_count,
100,
'Internal checking test is not testing existing values'
)
# On store classifier counts should be saved to DB.
self.classifier.nspam = 50
self.classifier.nham = 100
self.classifier.store()
state = self.classifier.get_state()
self.failUnlessEqual(state.spam_count, 50)
self.failUnlessEqual(state.ham_count, 100)
class UtilsTestCase(TestCase):
def setUp(self):
from moderator import utils
self.utils = utils
def test_classify_comment(self):
spam_comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="very bad spam"
)
ham_comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="awesome tasty ham"
)
unsure_comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="tasty spam"
)
generic_comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="foo bar"
)
abusive_comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="abusive comment"
)
for i in range(0, 3):
Vote.objects.create(
content_type=ContentType.objects.get_for_model(Comment),
object_id=abusive_comment.id,
token=i,
vote=-1
)
# Providing unsure should create unsure classification without
# training.
classified_comment = self.utils.classify_comment(
generic_comment,
'unsure'
)
self.failUnlessEqual(classified_comment.cls, 'unsure')
# Providing reported should create reported classification without
# training, comment should be removed.
classified_comment = self.utils.classify_comment(
generic_comment,
'reported'
)
self.failUnlessEqual(classified_comment.cls, 'reported')
self.failUnless(classified_comment.comment.is_removed)
# Without providing a class but with user abuse reports more or equal
# to cutoff should create reported classification without training,
# comment should be removed.
classified_comment = self.utils.classify_comment(abusive_comment)
self.failUnlessEqual(classified_comment.cls, 'reported')
self.failUnless(classified_comment.comment.is_removed)
# Providing ham class should create ham classification and training,
# comment should not be removed.
classified_comment = self.utils.classify_comment(ham_comment, 'ham')
self.failUnlessEqual(classified_comment.cls, 'ham')
self.failIf(classified_comment.comment.is_removed)
# Providing spam class should create spam classification and training,
# comment should be removed.
classified_comment = self.utils.classify_comment(spam_comment, 'spam')
self.failUnlessEqual(classified_comment.cls, 'spam')
self.failUnless(classified_comment.comment.is_removed)
# Spammy comment should now be correctly classified automatically
# without any training, should be removed.
comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="bad spam"
)
comment.total_downvotes = 0
classified_comment = self.utils.classify_comment(comment)
self.failUnlessEqual(classified_comment.cls, 'unsure')
# Hammy comment should now be correctly classified automatically
# without any training, should not be removed.
comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="tasty ham"
)
comment.total_downvotes = 0
classified_comment = self.utils.classify_comment(comment)
self.failUnlessEqual(classified_comment.cls, 'unsure')
self.failIf(classified_comment.comment.is_removed)
# Hammy spammy comment should now be correctly classified automatically
# as unsure without any training, should not be removed.
classified_comment = self.utils.classify_comment(unsure_comment)
self.failUnlessEqual(classified_comment.cls, 'unsure')
self.failIf(classified_comment.comment.is_removed)
# Should raise exception with unkown cls.
self.assertRaises(Exception, self.utils.classify_comment,
unsure_comment, 'unknown_cls')
classified_comment = self.utils.classify_comment(spam_comment, 'spam')
class InclusionTagsTestCase(TestCase):
def test_report_comment_abuse(self):
# Prepare context.
context = Context()
request = RequestFactory().get('/')
request.user = AnonymousUser()
request.META['HTTP_USER_AGENT'] = 'testing_agent'
request.secretballot_token = SecretBallotUserIpUseragentMiddleware().\
generate_token(request)
comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="abuse report testing comment"
)
context['request'] = request
context['comment'] = comment
# Without having actioned anything on the comment the
# Report Abuse action should be rendered.
out = Template("{% load moderator_inclusion_tags %}"
"{% report_comment_abuse comment %}").render(context)
self.failUnless('Report Abuse' in out)
# Like a comment.
views.vote(
request,
content_type='.'.join((comment._meta.app_label,
comment._meta.module_name)),
object_id=comment.id,
vote=1,
redirect_url='/',
can_vote_test=can_vote_test
)
# Reset previous like and test it applied.
Vote.objects.all().delete()
# Without having actioned anything on the comment the
# Report Abuse action should be rendered.
out = Template("{% load moderator_inclusion_tags %}"
"{% report_comment_abuse comment %}").render(context)
self.failUnless('Report Abuse' in out)
# Dislike/report an abuse comment.
views.vote(
request,
content_type='.'.join((comment._meta.app_label,
comment._meta.module_name)),
object_id=comment.id,
vote=-1,
redirect_url='/',
can_vote_test=can_vote_test
)
self.assertEqual(Vote.objects.all().count(), 1)
#repeat votes should not count
views.vote(
request,
content_type='.'.join((comment._meta.app_label,
comment._meta.module_name)),
object_id=comment.id,
vote=-1,
redirect_url='/',
can_vote_test=can_vote_test
)
self.assertEqual(Vote.objects.all().count(), 1)
def test_report_comment_abuse_signal(self):
# Prepare context.
context = Context()
request = RequestFactory().get('/')
request.user = AnonymousUser()
request.META['HTTP_REFERER'] = '/'
request.META['HTTP_USER_AGENT'] = 'testing_agent'
request.secretballot_token = SecretBallotUserIpUseragentMiddleware().\
generate_token(request)
comment = Comment.objects.create(
content_type_id=1,
site_id=1,
comment="abuse report testing comment"
)
context['request'] = request
context['comment'] = comment
content_type = '-'.join((comment._meta.app_label,
comment._meta.module_name))
# Reset previous like and test it applied.
Vote.objects.all().delete()
# Report an abuse comment - 1st
like(
request,
content_type=content_type,
id=comment.id,
vote=-1
)
self.assertEqual(Vote.objects.all().count(), 1)
self.failIf(Comment.objects.get(pk=comment.pk).is_removed)
# Report an abuse comment - 2nd
request.META['HTTP_USER_AGENT'] = 'testing_agent_2'
request.secretballot_token = SecretBallotUserIpUseragentMiddleware().\
generate_token(request)
like(
request,
content_type=content_type,
id=comment.id,
vote=-1,
)
self.assertEqual(Vote.objects.all().count(), 2)
self.failIf(Comment.objects.get(pk=comment.pk).is_removed)
# Report an abuse comment - 3rd
request.META['HTTP_USER_AGENT'] = 'testing_agent_3'
request.secretballot_token = SecretBallotUserIpUseragentMiddleware().\
generate_token(request)
like(
request,
content_type=content_type,
id=comment.id,
vote=-1,
)
self.assertEqual(Vote.objects.all().count(), 3)
self.failUnless(Comment.objects.get(pk=comment.pk).is_removed)
| |
# -*- coding: utf-8 -*-
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from django.shortcuts import get_object_or_404
from workflow.apps.API import serializers
from workflow.apps.team.models import Person, Team
from workflow.apps.workflow.models import Item, Comment, Workflow, Project, ItemCategory
from workflow.apps.workflow.models import update_workflow_position, update_item_position, update_category_position
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # To not perform the csrf check previously happening
class PersonList(APIView):
"""
List all person
"""
def get(self, request, format=None):
"""
List all person
---
response_serializer: serializers.PersonSerializer
"""
person = Person.objects.all()
serializer = serializers.PersonSerializer(person, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class TeamList(APIView):
"""
List all team
"""
def get(self, request, format=None):
"""
List all person
---
response_serializer: serializers.TeamSerializer
"""
teams = Team.objects.all()
serializer = serializers.TeamSerializer(teams, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ProjectList(APIView):
"""
List all project or create new
"""
def get(self, request, format=None):
"""
List all project
---
response_serializer: serializers.ProjectSerializer
"""
projects = Project.objects.all()
serializer = serializers.ProjectSerializer(projects, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new project
---
request_serializer: serializers.ProjectSerializer
response_serializer: serializers.ProjectSerializer
"""
serializer = serializers.ProjectSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class WorkflowList(APIView):
"""
List all workflow or create new
"""
def get(self, request, format=None):
"""
List all workflow
---
response_serializer: serializers.WorkflowSerializer
"""
workflows = Workflow.objects.all()
serializer = serializers.WorkflowSerializer(workflows, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, format=None):
"""
Create new workflow
---
request_serializer: serializers.WorkflowSerializer
response_serializer: serializers.WorkflowSerializer
"""
serializer = serializers.WorkflowSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UsersWorkflowList(APIView):
"""
Display workflow with selected filter
"""
def get(self, request, person_pk, format=None):
"""
Display workflow with selected filter
---
response_serializer: serializers.WorkflowSerializer
"""
person = get_object_or_404(Person, pk=person_pk)
workflows = Workflow.objects.filter(archived=False)
workflows_list = set()
for workflow in workflows:
if workflow.get_items('mine', person=person):
workflows_list.add(workflow)
serializer = serializers.WorkflowSerializer(workflows_list, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class WorkflowDetailsFilter(APIView):
"""
Display workflow details from selecte filter
"""
def get(self, request, display, person_pk, workflow_pk):
"""
Display workflow details for selected person
---
response_serializer: serializers.ItemSerializer
"""
person = get_object_or_404(Person, pk=person_pk)
workflow = get_object_or_404(Workflow, pk=workflow_pk)
items = workflow.get_items('mine', person=person)
serializer = serializers.ItemSerializer(items, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ProjectWorkflowList(APIView):
"""
List all workflow of selected project
"""
def get(self, request, project_pk, format=None):
"""
List all workflow of selected project
---
response_serializer: serializers.WorkflowSerializer
"""
project = get_object_or_404(Project, pk=project_pk)
workflows = Workflow.objects.filter(project=project)
serializer = serializers.WorkflowSerializer(workflows, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class CommentList(APIView):
"""
Display comment list or create new
"""
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, item_pk, format=None):
"""
Display comment list of selected item
---
response_serializer: serializers.CommentSerializer
"""
comments = Comment.objects.filter(item__pk=item_pk)
serializer = serializers.CommentSerializer(comments, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
def post(self, request, item_pk, format=None):
"""
Create new comment for selected item
---
request_serializer: serializers.CommentSerializer
response_serializer: serializers.CommentSerializer
"""
data = dict(request.data)
data['item'] = item_pk
data['person'] = int(request.data['person'])
data['text'] = request.data['text']
serializer = serializers.CommentSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, item_pk, format=None):
"""
Delete selected comment
---
request_serializer: serializers.CommentSerializer
"""
comment = get_object_or_404(Comment, pk=item_pk)
comment.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ItemCategoryDetails(APIView):
"""
Retrieve, update or delete details of selected item category
"""
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def patch(self, request, category_pk, format=None):
"""
Update partial details of selected item category
---
request_serializer: serializers.ItemCategorySerializer
response_serializer: serializers.ItemCategorySerializer
"""
category = get_object_or_404(ItemCategory, pk=category_pk)
serializer = serializers.ItemCategorySerializer(category, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST)
def delete(selft, request, category_pk, format=None):
"""
Delete selected item category
"""
category = get_object_or_404(ItemCategory, pk=category_pk)
category.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ItemDetails(APIView):
"""
Retrieve or update details of selected item
"""
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def patch(self, request, item_pk, format=None):
"""
Update partial details of selected item
---
request_serializer: serializers.ItemSerializer
response_serializer: serializers.ItemSerializer
"""
item = get_object_or_404(Item, pk=item_pk)
serializer = serializers.ItemSerializer(item, data=request.data, partial=True)
if 'name' in request.data: # If name of 'Item' change, change ItemModel name to.
item_model = item.item_model
item_model.name = request.data['name']
item_model.save()
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, item_pk, format=None):
"""
Delete selected item
"""
item = get_object_or_404(Item, pk=item_pk)
item.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class WorkflowDetails(APIView):
"""
Retrieve or update details of selected workflow
"""
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def get(self, request, workflow_pk, format=None):
"""
Retrieve details of selected workflow
---
response_serializer: serializers.WorkflowSerializer
"""
workflow = get_object_or_404(Workflow, pk=workflow_pk)
serializer = serializers.WorkflowSerializer(workflow)
return Response(serializer.data, status=status.HTTP_200_OK)
def patch(self, request, workflow_pk, format=None):
"""
Update informations of selected workflow
---
request_serializer: serializers.WorkflowSerializer
response_serializer: serializers.WorkflowSerializer
"""
workflow = get_object_or_404(Workflow, pk=workflow_pk)
serializer = serializers.WorkflowSerializer(workflow, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class WorkflowDragPosition(APIView):
"""
Update workflow position with drag and drop
"""
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request, workflow_pk, related_pk=None):
"""
Update workflow position with drag and drop
"""
workflow = get_object_or_404(Workflow, pk=workflow_pk)
if related_pk is not None:
related_item = Workflow.objects.get(pk=related_pk)
update_workflow_position(workflow, related_item)
else:
update_workflow_position(workflow)
return Response(status=status.HTTP_200_OK)
class ItemDragPosition(APIView):
"""
Update item position with drag and drop
"""
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request, item_pk, related_pk=None):
"""
Update item position with drag and drop
"""
item = get_object_or_404(Item, pk=item_pk)
if related_pk is not None:
related_item = Item.objects.get(pk=related_pk)
update_item_position(item, related_item)
else:
update_item_position(item)
return Response(status=status.HTTP_200_OK)
class CategoryDragPosition(APIView):
"""
Update category position with drag and drop
"""
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request, category_pk, related_pk=None):
"""
Update category position with drag and drop
"""
category = get_object_or_404(ItemCategory, pk=category_pk)
workflow = category.workflow_set.all()[0]
if related_pk is not None:
related_item = ItemCategory.objects.get(pk=related_pk)
update_category_position(workflow, category, related_item)
else:
update_category_position(workflow, category)
return Response(status=status.HTTP_200_OK)
| |
import argparse
import getpass
import keyring
import logging
import re
import subprocess
import sys
import tempfile
from colorlog import ColoredFormatter
from emaildiff.mail import send as send
from os import path
EPILOG = """ Utility to email the color diff and patches in email from shell.
Examples:
\x1b[36mgit %s --compose\x1b[m
compose message in default git editor to be sent prefixed with diff of changes.
\x1b[36mgit %s -u -to skysam@gmail.cam asksam@live.com\x1b[m
if you have uncommited chanegs you can use this flag to send diff of uncommited
changes compared to the last commit head. you can simultaneusly send to multiple
recipients as in this case separated by space between skysam@gmail.cam asksam@live.com
\x1b[36mgit %s -to --compse\x1b[m
email address to whom you want to send to, along with --compose flag to write
content of email message.
\x1b[36mgit %s --compose --patches 4 -to skysam@gmail.cam\x1b[m
first flag will cause text editor to open for you to compose message and then
--patches will flag will take 4 number of last commit diffs to be attached and
emailed to skysan@gmail.cam
\x1b[36mgit %s -d 'HEAD^ HEAD' -to skysam@gmail.cam\x1b[m
if present pass arguments to it as you will do to git diff in inverted commas.
"""
def __validate_address(address):
""" If address looks like a valid e-mail address, return it. Otherwise
raise ArgumentTypeError.
Args:
address(string): email address to send to
.. document private functions
.. automethod:: _evaporate
"""
if re.match('^([^@\s]+)@((?:[-a-z0-9]+\.)+[a-z]{2,})$', address):
return address
raise argparse.ArgumentTypeError('Invalid e-mail address: %s' % address)
def main():
"""
This function parses the argumets passed from commandline.
creates a logger and inject it to function.
"""
appName = path.basename(sys.argv[0]).split('-')[-1]
parser = argparse.ArgumentParser(prog=appName,
description=__doc__, epilog=EPILOG% tuple([appName] * EPILOG.count('%s')),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='if enabled will spit every command and its resulting data.')
parser.add_argument('-c', '--compose', action='store_true',
help='compose message in default git editor to be sent prefixed with diff')
parser.add_argument('-to', type=__validate_address, metavar='Email', nargs='+',
help='enter a valid email you want to send to.')
parser.add_argument('-p', '--patches', type=int, default=0, metavar='*.patch files',
help='total number of pathces of last commits to email')
parser.add_argument('-d', '--diff', required=False, default='HEAD^ HEAD',
metavar='HEAD^ HEAD', help='if present pass arguments to it as you \
will do to git diff in inverted commas')
parser.add_argument('-u', required=False, action='store_true', help='pass argument \
to email diff of uncommited changes.')
args = parser.parse_args()
logger = logging.getLogger(appName)
handler = logging.StreamHandler()
DATE_FORMAT = '%H:%M'
formatter = ColoredFormatter(
"%(log_color)s%(name)s %(asctime)-2s%(reset)s %(message_log_color)s%(message)s",
secondary_log_colors={
'message': {
'ERROR': 'red',
'CRITICAL': 'red',
'INFO': 'cyan',
'WARNING': 'yellow'
}
},
datefmt=DATE_FORMAT,
)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
__pre_Check(args, logger)
def __update_config(log, key, value):
""" Function to update the global config git file.
:param key(string): key of the git command
:param value(string): value of the key
.. document private functions
.. automethod:: _evaporate
"""
_exec_git_command(log, 'git config --global %s %s' % (key, value))
def _guarantee_bool(function):
""" A decorator that guarantees a true/false response.
"""
def wrapper(*args, **kargs):
try:
return bool(function(*args, **kargs))
except:
return False
return wrapper
def config_db(log):
"""
Reads git global config file
:param log: logger to log information
:type log: logging.Logger
:returns config: git config settings
:type config: dict
"""
# Read git config file
configFile, _ = _exec_git_command(log, 'git config --list')
config = {}
for line in (_line for _line in configFile.split("\n") if _line):
config[line.split("=")[0]] = line.split("=")[-1]
return config
def launchEditor(editor):
"""
This function launches the default git editor
set in git config for user to compose message
to be sent along with git diff.
:param editor: name or path of editor
:type editor: str
:returns msg: html formatted message
:type msg: str
"""
with tempfile.NamedTemporaryFile(delete=False) as f:
f.close()
if "-" in editor and editor not in ['vi', 'vim', 'nano', 'emacs']:
editor = re.sub(" -.*", "", editor)
openToEdit = [editor, f.name]
if re.search('Sublime', editor):
openToEdit = [editor, '-w', f.name]
else:
openToEdit = [editor, f.name]
if subprocess.call(openToEdit) != 0:
raise IOError("%s exited with code." % (editor.split(" ")[0]))
msg = ''
with open(f.name) as temp_file:
temp_file.seek(0)
for line in temp_file.readlines():
msg += line
return "".join(msg).replace("\n", "<br>")
def _setUp_maildiff(log, config):
""" this function prompts user to enter email settings
for this git maildiff command to store in .gitconfig
while password is stored in os keychain.
Args:
config(dict): existing config from .gitconfig
"""
# check if data in global config
if not config.has_key('maildiff.mailfrom'):
log.info("\x1b[32mFirst time mail setup.\x1b[m")
userEmail = config['user.email']
log.warning("Do you want to use your git email '%s' to send diffs or any other email address ?", userEmail)
ret = raw_input('[YES]')
if ret.lower() in ['', 'yes', 'y']:
ret = userEmail
__update_config(log, 'maildiff.mailfrom', ret)
else:
ret = __validate_address(ret)
_exec_git_command(log, 'git maildiff.mailfrom %s' % ret)
__update_config(log, 'maildiff.mailfrom', ret)
log.info("Please enter password for the email: %s" , ret)
emailPwd = getpass.getpass(prompt=" Password: ")
keyring.set_password('maildiff', ret, emailPwd)
# enter SMTP details for sending emails
if not config.has_key('maildiff.smtpserver'):
log.info("Add SMTP details for '%s'.", ret)
smtpServer = raw_input(" SMTP Server: ")
__update_config(log, 'maildiff.smtpserver', smtpServer)
smtpServerPort = raw_input(" SMTP Server Port: ")
__update_config(log, 'maildiff.smtpserverport', smtpServerPort)
smtpEncryption = raw_input(" Server Encryption: ")
__update_config(log, 'maildiff.smtpencryption', smtpEncryption)
return True
def __pre_Check(args, log):
""" This function do a pre-check of the repository state
and default value to variables from git config
Args:
args(argparse.Namespace): data from git config
"""
config = config_db(log)
editor = config['core.editor'] if config.has_key('core.editor') else 'vi'
VERBOSE = args.verbose
diffCmd = 'git diff' if args.u else 'git diff %s' % args.diff
branchName, _ = _exec_git_command(log, 'git rev-parse --abbrev-ref HEAD')
if not branchName:
return
# stripping newline character which got appended when pulling branch name
branchName = branchName.split("\n")[0]
commitComment, _ = _exec_git_command(log, 'git log -1 --pretty=%B')
subject = "%s: %s" % (branchName, commitComment)
# check for fatal error when executing git command
diffData, error = _exec_git_command(log, diffCmd, VERBOSE)
if 'fatal' not in error.split(":"):
modifiedData, error = _exec_git_command(log, 'git status', VERBOSE)
if any([re.search(word, modifiedData) for word in ['modified', 'untracked']]):
log.warning('You have uncommited changes.')
if not args.u:
log.info("Use git maildiff -u to email diff of uncommited changes")
return
name, _ = _exec_git_command(log, 'git format-patch -%s' % args.patches)
patches = [item for item in name.split("\n") if item]
if diffData:
message = ""
if args.compose:
message = launchEditor(editor)
htmlDiff = get_Html(diffData.split("\n"))
remotePath, _ = _exec_git_command(log, 'git config --get remote.origin.url')
message = "%s<br>git clone %s<br><br>%s" % (message, remotePath, htmlDiff)
updateComplete = _setUp_maildiff(log, config)
if updateComplete:
# update the user email info by reading config again
config = config_db(log)
mailtos = args.to if args.to else [raw_input(
"Who do you want to send to ?")]
for mailto in mailtos:
log.info("Trying to send to %s", mailto)
__email_diff(log, subject, mailto, message, patches)
else:
log.error(error.capitalize())
def get_Html(linesfromDiff, sideBySide=False):
""" Converts plain git diff text to html color code
:param linesfromDiff: diff between commits in simple text
:type linesfromDiff: str
:param sideBySide: whether diff to be displayed side
by side or not
:type sideBySide: bool
:Returns lines: colored html diff text
:type lines: str
"""
openTag = """<span style='font-size:1.0em; color: """
openTagEnd = ";font-family: courier, arial, helvetica, sans-serif;'>"
nbsp = ' '
if sideBySide:
# TODO
# build data of side by side html lines with color formating
pass
else:
return _traditional_diff(linesfromDiff, openTag, openTagEnd, nbsp)
def _traditional_diff(linesfromDiff, openTag, openTagEnd, nbsp):
lines = []
line_num = 0
def updateLine(line_num, color, line):
tabs = line.count('\t')
lines.append("%s:%s#%s%s%s%s</span><br>" %
((repr(line_num), openTag, color, openTagEnd, nbsp*tabs, line)))
return lines
for line in linesfromDiff:
if (line.startswith('diff ') or
line.startswith('index ') or
line.startswith('--- ')):
color = "10EDF5"
updateLine(line_num, color, line)
continue
if line.startswith('-'):
color = "ff0000"
updateLine(line_num, color, line)
continue
if line.startswith('+++ '):
color = "07CB14"
updateLine(line_num, color, line)
continue
if line.startswith('@@ '):
_, old_nr, new_nr, _ = line.split(' ', 3)
line_num = int(new_nr.split(',')[0])
color = "5753BE"
updateLine(line_num, color, line)
continue
if line.startswith('+'):
color = "007900"
updateLine(line_num, color, line)
if line.startswith('+') or line.startswith(' '):
line_num += 1
return ''.join(lines)
def _exec_git_command(log, command, verbose=False):
""" Function used to get data out of git commads
and errors in case of failure.
Args:
command(string): string of a git command
verbose(bool): whether to display every command
and its resulting data.
Returns:
(tuple): string of Data and error if present
"""
# converts multiple spaces to single space
command = re.sub(' +',' ',command)
pr = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
msg = pr.stdout.read()
err = pr.stderr.read()
if err:
log.error(err)
if 'Could not resolve host' in err:
return
if verbose and msg:
log.info("Executing '%s' %s", command, msg)
return msg, err
def __email_diff(log, subject, emailTo, htmlDiff, attachment):
""" This function send color diff via email
Args:
subject(string): name of the branch with commit message
htmlDiff(string): html formatted string
attachment(list): list of file names to be attached
"""
# add tool signature
htmlDiff = """%s<br><br>
Sent using git maildiff<br>
git clone https://sanfx@bitbucket.org/sanfx/git-maildiff.git""" % htmlDiff
emailInfo = config_db(log)
pwd = str(keyring.get_password('maildiff', emailInfo['maildiff.mailfrom']))
mail = send.EMail(
mailfrom=emailInfo['maildiff.mailfrom'],
server=emailInfo['maildiff.smtpserver'],
usrname=emailInfo['maildiff.mailfrom'].split('@')[0],
password=pwd,
logger=log,
debug=False
)
try:
emailTo = __validate_address(emailTo)
except argparse.ArgumentTypeError as er:
log.error("%s. Message not sent.", er)
else:
isSent = mail.sendMessage(subject, htmlDiff, attachment, emailTo)
if isSent:
msg = ' Diff of branch, %s sent to email: %s .' % (subject, emailTo)
log.info(msg)
| |
#!/usr/bin/env python
from __future__ import print_function
from builtins import input
from builtins import str
import sys
import pmagpy.pmag as pmag
def main(command_line=True, **kwargs):
"""
NAME
sio_magic.py
DESCRIPTION
converts SIO .mag format files to magic_measurements format files
SYNTAX
sio_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-usr USER: identify user, default is ""
-f FILE: specify .mag format input file, required
-fsa SAMPFILE : specify er_samples.txt file relating samples, site and locations names,default is none -- values in SAMPFILE will override selections for -loc (location), -spc (designate specimen), and -ncn (sample-site naming convention)
-F FILE: specify output file, default is magic_measurements.txt
-Fsy: specify er_synthetics file, default is er_sythetics.txt
-LP [colon delimited list of protocols, include all that apply]
AF: af demag
T: thermal including thellier but not trm acquisition
S: Shaw method
I: IRM (acquisition)
I3d: 3D IRM experiment
N: NRM only
TRM: trm acquisition
ANI: anisotropy experiment
D: double AF demag
G: triple AF demag (GRM protocol)
CR: cooling rate experiment.
The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional)
where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps.
XXX.00 is optional zerofield baseline. XXX.70 is alteration check.
syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A
where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70
if you use a zerofield step then no need to specify the cooling rate for the zerofield
It is important to add to the command line the -A option so the measurements will not be averaged.
But users need to make sure that there are no duplicate measurements in the file
-V [1,2,3] units of IRM field in volts using ASC coil #1,2 or 3
-spc NUM : specify number of characters to designate a specimen, default = 0
-loc LOCNAME : specify location/study name, must have either LOCNAME or SAMPFILE or be a synthetic
-syn INST TYPE: sets these specimens as synthetics created at institution INST and of type TYPE
-ins INST : specify which demag instrument was used (e.g, SIO-Suzy or SIO-Odette),default is ""
-dc B PHI THETA: dc lab field (in micro tesla) and phi,theta, default is none
NB: use PHI, THETA = -1 -1 to signal that it changes, i.e. in anisotropy experiment
-ac B : peak AF field (in mT) for ARM acquisition, default is none
-ncn NCON: specify naming convention: default is #1 below
-A: don't average replicate measurements
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
[8] synthetic - has no site name
[9] ODP naming convention
INPUT
Best to put separate experiments (all AF, thermal, thellier, trm aquisition, Shaw, etc.) in
seperate .mag files (eg. af.mag, thermal.mag, etc.)
Format of SIO .mag files:
Spec Treat CSD Intensity Declination Inclination [optional metadata string]
Spec: specimen name
Treat: treatment step
XXX T in Centigrade
XXX AF in mT
for special experiments:
Thellier:
XXX.0 first zero field step
XXX.1 first in field step [XXX.0 and XXX.1 can be done in any order]
XXX.2 second in-field step at lower temperature (pTRM check)
XXX.3 second zero-field step after infield (pTRM check step)
XXX.3 MUST be done in this order [XXX.0, XXX.1 [optional XXX.2] XXX.3]
AARM:
X.00 baseline step (AF in zero bias field - high peak field)
X.1 ARM step (in field step) where
X is the step number in the 15 position scheme
(see Appendix to Lecture 13 - http://magician.ucsd.edu/Essentials_2)
ATRM:
X.00 optional baseline
X.1 ATRM step (+X)
X.2 ATRM step (+Y)
X.3 ATRM step (+Z)
X.4 ATRM step (-X)
X.5 ATRM step (-Y)
X.6 ATRM step (-Z)
X.7 optional alteration check (+X)
TRM:
XXX.YYY XXX is temperature step of total TRM
YYY is dc field in microtesla
Intensity assumed to be total moment in 10^3 Am^2 (emu)
Declination: Declination in specimen coordinate system
Inclination: Declination in specimen coordinate system
Optional metatdata string: mm/dd/yy;hh:mm;[dC,mT];xx.xx;UNITS;USER;INST;NMEAS
hh in 24 hours.
dC or mT units of treatment XXX (see Treat above) for thermal or AF respectively
xx.xxx DC field
UNITS of DC field (microT, mT)
INST: instrument code, number of axes, number of positions (e.g., G34 is 2G, three axes,
measured in four positions)
NMEAS: number of measurements in a single position (1,3,200...)
"""
# initialize some stuff
mag_file = None
codelist = None
infile_type="mag"
noave=0
methcode,inst="LP-NO",""
phi,theta,peakfield,labfield=0,0,0,0
pTRM,MD,samp_con,Z=0,0,'1',1
dec=[315,225,180,135,45,90,270,270,270,90,180,180,0,0,0]
inc=[0,0,0,0,0,-45,-45,0,45,45,45,-45,-90,-45,45]
tdec=[0,90,0,180,270,0,0,90,0]
tinc=[0,0,90,0,0,-90,0,0,90]
missing=1
demag="N"
er_location_name=""
citation='This study'
args=sys.argv
fmt='old'
syn=0
synfile='er_synthetics.txt'
samp_infile,Samps='',[]
trm=0
irm=0
specnum=0
coil=""
mag_file=""
#
# get command line arguments
#
meas_file="magic_measurements.txt"
user=""
if not command_line:
user = kwargs.get('user', '')
meas_file = kwargs.get('meas_file', '')
syn_file = kwargs.get('syn_file', '')
mag_file = kwargs.get('mag_file', '')
labfield = kwargs.get('labfield', '')
if labfield:
labfield = float(labfield) *1e-6
else:
labfield = 0
phi = kwargs.get('phi', 0)
if phi:
phi = float(phi)
else:
phi = 0
theta = kwargs.get('theta', 0)
if theta:
theta=float(theta)
else:
theta = 0
peakfield = kwargs.get('peakfield', 0)
if peakfield:
peakfield=float(peakfield) *1e-3
else:
peakfield = 0
specnum = kwargs.get('specnum', 0)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
samp_infile = kwargs.get('samp_infile', '')
syn = kwargs.get('syn', 0)
institution = kwargs.get('institution', '')
syntype = kwargs.get('syntype', '')
inst = kwargs.get('inst', '')
noave = kwargs.get('noave', 0)
codelist = kwargs.get('codelist', '')
coil = kwargs.get('coil', '')
cooling_rates = kwargs.get('cooling_rates', '')
if command_line:
if "-h" in args:
print(main.__doc__)
return False
if "-usr" in args:
ind=args.index("-usr")
user=args[ind+1]
if '-F' in args:
ind=args.index("-F")
meas_file=args[ind+1]
if '-Fsy' in args:
ind=args.index("-Fsy")
synfile=args[ind+1]
if '-f' in args:
ind=args.index("-f")
mag_file=args[ind+1]
if "-dc" in args:
ind=args.index("-dc")
labfield=float(args[ind+1])*1e-6
phi=float(args[ind+2])
theta=float(args[ind+3])
if "-ac" in args:
ind=args.index("-ac")
peakfield=float(args[ind+1])*1e-3
if "-spc" in args:
ind=args.index("-spc")
specnum=int(args[ind+1])
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-fsa" in args:
ind=args.index("-fsa")
samp_infile = args[ind+1]
if '-syn' in args:
syn=1
ind=args.index("-syn")
institution=args[ind+1]
syntype=args[ind+2]
if '-fsy' in args:
ind=args.index("-fsy")
synfile=args[ind+1]
if "-ins" in args:
ind=args.index("-ins")
inst=args[ind+1]
if "-A" in args: noave=1
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if '-LP' in args:
ind=args.index("-LP")
codelist=args[ind+1]
if "-V" in args:
ind=args.index("-V")
coil=args[ind+1]
# make sure all initial values are correctly set up (whether they come from the command line or a GUI)
if samp_infile:
Samps, file_type = pmag.magic_read(samp_infile)
if coil:
coil = str(coil)
methcode="LP-IRM"
irmunits = "V"
if coil not in ["1","2","3"]:
print(main.__doc__)
print('not a valid coil specification')
return False, '{} is not a valid coil specification'.format(coil)
if mag_file:
try:
input=open(mag_file,'r')
except:
print("bad mag file name")
return False, "bad mag file name"
if not mag_file:
print(main.__doc__)
print("mag_file field is required option")
return False, "mag_file field is required option"
if specnum!=0:
specnum=-specnum
#print 'samp_con:', samp_con
if samp_con:
if "4" == samp_con[0]:
if "-" not in samp_con:
print("naming convention option [4] must be in form 4-Z where Z is an integer")
print('---------------')
return False, "naming convention option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" == samp_con[0]:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
if codelist:
codes=codelist.split(':')
if "AF" in codes:
demag='AF'
if'-dc' not in args: methcode="LT-AF-Z"
if'-dc' in args: methcode="LT-AF-I"
if "T" in codes:
demag="T"
if '-dc' not in args: methcode="LT-T-Z"
if '-dc' in args: methcode="LT-T-I"
if "I" in codes:
methcode="LP-IRM"
irmunits="mT"
if "I3d" in codes:
methcode="LT-T-Z:LP-IRM-3D"
if "S" in codes:
demag="S"
methcode="LP-PI-TRM:LP-PI-ALT-AFARM"
trm_labfield=labfield
ans=input("DC lab field for ARM step: [50uT] ")
if ans=="":
arm_labfield=50e-6
else:
arm_labfield=float(ans)*1e-6
ans=input("temperature for total trm step: [600 C] ")
if ans=="":
trm_peakT=600+273 # convert to kelvin
else:
trm_peakT=float(ans)+273 # convert to kelvin
if "G" in codes: methcode="LT-AF-G"
if "D" in codes: methcode="LT-AF-D"
if "TRM" in codes:
demag="T"
trm=1
if "CR" in codes:
demag="T"
cooling_rate_experiment=1
if command_line:
ind=args.index("CR")
cooling_rates=args[ind+1]
cooling_rates_list=cooling_rates.split(',')
else:
cooling_rates_list=str(cooling_rates).split(',')
if demag=="T" and "ANI" in codes:
methcode="LP-AN-TRM"
if demag=="T" and "CR" in codes:
methcode="LP-CR-TRM"
if demag=="AF" and "ANI" in codes:
methcode="LP-AN-ARM"
if labfield==0: labfield=50e-6
if peakfield==0: peakfield=.180
SynRecs,MagRecs=[],[]
version_num=pmag.get_version()
##################################
if 1:
#if infile_type=="SIO format":
for line in input.readlines():
instcode=""
if len(line)>2:
SynRec={}
MagRec={}
MagRec['er_location_name']=er_location_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
meas_type="LT-NO"
rec=line.split()
if rec[1]==".00":rec[1]="0.00"
treat=rec[1].split('.')
if methcode=="LP-IRM":
if irmunits=='mT':
labfield=float(treat[0])*1e-3
else:
labfield=pmag.getfield(irmunits,coil,treat[0])
if rec[1][0]!="-":
phi,theta=0.,90.
else:
phi,theta=0.,-90.
meas_type="LT-IRM"
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
if len(rec)>6:
code1=rec[6].split(';') # break e.g., 10/15/02;7:45 indo date and time
if len(code1)==2: # old format with AM/PM
missing=0
code2=code1[0].split('/') # break date into mon/day/year
code3=rec[7].split(';') # break e.g., AM;C34;200 into time;instr/axes/measuring pos;number of measurements
yy=int(code2[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(code2[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(code2[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if code3[0]=="PM":hh=hh+12
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else: min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]='SAN'
if inst=="":
if code3[1][0]=='C':instcode='SIO-bubba'
if code3[1][0]=='G':instcode='SIO-flo'
else:
instcode=''
MagRec["measurement_positions"]=code3[1][2]
elif len(code1)>2: # newest format (cryo7 or later)
if "LP-AN-ARM" not in methcode:labfield=0
fmt='new'
date=code1[0].split('/') # break date into mon/day/year
yy=int(date[2])
if yy <90:
yyyy=str(2000+yy)
else: yyyy=str(1900+yy)
mm=int(date[0])
if mm<10:
mm="0"+str(mm)
else: mm=str(mm)
dd=int(date[1])
if dd<10:
dd="0"+str(dd)
else: dd=str(dd)
time=code1[1].split(':')
hh=int(time[0])
if hh<10:
hh="0"+str(hh)
else: hh=str(hh)
min=int(time[1])
if min<10:
min= "0"+str(min)
else:
min=str(min)
MagRec["measurement_date"]=yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00.00"
MagRec["measurement_time_zone"]='SAN'
if inst=="":
if code1[6][0]=='C':
instcode='SIO-bubba'
if code1[6][0]=='G':
instcode='SIO-flo'
else:
instcode=''
if len(code1)>1:
MagRec["measurement_positions"]=code1[6][2]
else:
MagRec["measurement_positions"]=code1[7] # takes care of awkward format with bubba and flo being different
if user=="":user=code1[5]
if code1[2][-1]=='C':
demag="T"
if code1[4]=='microT' and float(code1[3])!=0. and "LP-AN-ARM" not in methcode: labfield=float(code1[3])*1e-6
if code1[2]=='mT' and methcode!="LP-IRM":
demag="AF"
if code1[4]=='microT' and float(code1[3])!=0.: labfield=float(code1[3])*1e-6
if code1[4]=='microT' and labfield!=0. and meas_type!="LT-IRM":
phi,theta=0.,-90.
if demag=="T": meas_type="LT-T-I"
if demag=="AF": meas_type="LT-AF-I"
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
if code1[4]=='' or labfield==0. and meas_type!="LT-IRM":
if demag=='T':meas_type="LT-T-Z"
if demag=="AF":meas_type="LT-AF-Z"
MagRec["treatment_dc_field"]='0'
if syn==0:
MagRec["er_specimen_name"]=rec[0]
MagRec["er_synthetic_name"]=""
MagRec["er_site_name"]=""
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
if samp_infile and Samps: # if samp_infile was provided AND yielded sample data
samp=pmag.get_dictitem(Samps,'er_sample_name',MagRec['er_sample_name'],'T')
if len(samp)>0:
MagRec["er_location_name"]=samp[0]["er_location_name"]
MagRec["er_site_name"]=samp[0]["er_site_name"]
else:
MagRec['er_location_name']=''
MagRec["er_site_name"]=''
elif int(samp_con)!=6:
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
MagRec["er_site_name"]=site
if MagRec['er_site_name']=="":
print('No site name found for: ',MagRec['er_specimen_name'],MagRec['er_sample_name'])
if MagRec["er_location_name"]=="":
print('no location name for: ',MagRec["er_specimen_name"])
else:
MagRec["er_specimen_name"]=rec[0]
if specnum!=0:
MagRec["er_sample_name"]=rec[0][:specnum]
else:
MagRec["er_sample_name"]=rec[0]
MagRec["er_site_name"]=""
MagRec["er_synthetic_name"]=MagRec["er_specimen_name"]
SynRec["er_synthetic_name"]=MagRec["er_specimen_name"]
site=pmag.parse_site(MagRec['er_sample_name'],samp_con,Z)
SynRec["synthetic_parent_sample"]=site
SynRec["er_citation_names"]="This study"
SynRec["synthetic_institution"]=institution
SynRec["synthetic_type"]=syntype
SynRecs.append(SynRec)
if float(rec[1])==0:
pass
elif demag=="AF":
if methcode != "LP-AN-ARM":
MagRec["treatment_ac_field"]='%8.3e' %(float(rec[1])*1e-3) # peak field in tesla
if meas_type=="LT-AF-Z": MagRec["treatment_dc_field"]='0'
else: # AARM experiment
if treat[1][0]=='0':
meas_type="LT-AF-Z:LP-AN-ARM:"
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(0)
if labfield!=0 and methcode!="LP-AN-ARM": print("Warning - inconsistency in mag file with lab field - overriding file with 0")
else:
meas_type="LT-AF-I:LP-AN-ARM"
ipos=int(treat[0])-1
MagRec["treatment_dc_field_phi"]='%7.1f' %(dec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (inc[ipos])
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
elif demag=="T" and methcode == "LP-AN-TRM":
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if treat[1][0]=='0':
meas_type="LT-T-Z:LP-AN-TRM"
MagRec["treatment_dc_field"]='%8.3e'%(0)
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
if treat[1][0]=='7': # alteration check as final measurement
meas_type="LT-PTRM-I:LP-AN-TRM"
else:
meas_type="LT-T-I:LP-AN-TRM"
# find the direction of the lab field in two ways:
# (1) using the treatment coding (XX.1=+x, XX.2=+y, XX.3=+z, XX.4=-x, XX.5=-y, XX.6=-z)
ipos_code=int(treat[1][0])-1
# (2) using the magnetization
DEC=float(rec[4])
INC=float(rec[5])
if INC < 45 and INC > -45:
if DEC>315 or DEC<45: ipos_guess=0
if DEC>45 and DEC<135: ipos_guess=1
if DEC>135 and DEC<225: ipos_guess=3
if DEC>225 and DEC<315: ipos_guess=4
else:
if INC >45: ipos_guess=2
if INC <-45: ipos_guess=5
# prefer the guess over the code
ipos=ipos_guess
MagRec["treatment_dc_field_phi"]='%7.1f' %(tdec[ipos])
MagRec["treatment_dc_field_theta"]='%7.1f'% (tinc[ipos])
# check it
if ipos_guess!=ipos_code and treat[1][0]!='7':
print("-E- ERROR: check specimen %s step %s, ATRM measurements, coding does not match the direction of the lab field!"%(rec[0],".".join(list(treat))))
elif demag=="S": # Shaw experiment
if treat[1][1]=='0':
if int(treat[0])!=0:
MagRec["treatment_ac_field"]='%8.3e' % (float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z" # first AF
else:
meas_type="LT-NO"
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='0'
elif treat[1][1]=='1':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
meas_type="LT-AF-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
elif treat[1][1]=='2':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='0'
MagRec["treatment_dc_field"]='%8.3e'%(trm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
MagRec["treatment_temp"]='%8.3e' % (trm_peakT)
meas_type="LT-T-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
elif treat[1][1]=='3':
if int(treat[0])==0:
MagRec["treatment_ac_field"]='%8.3e' %(peakfield) # peak field in tesla
MagRec["treatment_dc_field"]='%8.3e'%(arm_labfield)
MagRec["treatment_dc_field_phi"]='%7.1f'%(phi)
MagRec["treatment_dc_field_theta"]='%7.1f'%(theta)
meas_type="LT-AF-I"
else:
MagRec["treatment_ac_field"]='%8.3e' % ( float(treat[0])*1e-3) # AF field in tesla
MagRec["treatment_dc_field"]='0'
meas_type="LT-AF-Z"
# Cooling rate experient # added by rshaar
elif demag=="T" and methcode == "LP-CR-TRM":
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if treat[1][0]=='0':
meas_type="LT-T-Z:LP-CR-TRM"
MagRec["treatment_dc_field"]='%8.3e'%(0)
MagRec["treatment_dc_field_phi"]='0'
MagRec["treatment_dc_field_theta"]='0'
else:
MagRec["treatment_dc_field"]='%8.3e'%(labfield)
if treat[1][0]=='7': # alteration check as final measurement
meas_type="LT-PTRM-I:LP-CR-TRM"
else:
meas_type="LT-T-I:LP-CR-TRM"
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
indx=int(treat[1][0])-1
# alteration check matjed as 0.7 in the measurement file
if indx==6:
cooling_time= cooling_rates_list[-1]
else:
cooling_time=cooling_rates_list[indx]
MagRec["measurement_description"]="cooling_rate"+":"+cooling_time+":"+"K/min"
elif demag!='N':
if len(treat)==1:treat.append('0')
MagRec["treatment_temp"]='%8.3e' % (float(treat[0])+273.) # temp in kelvin
if trm==0: # demag=T and not trmaq
if treat[1][0]=='0':
meas_type="LT-T-Z"
else:
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
if treat[1][0]=='1':meas_type="LT-T-I" # in-field thermal step
if treat[1][0]=='2':
meas_type="LT-PTRM-I" # pTRM check
pTRM=1
if treat[1][0]=='3':
MagRec["treatment_dc_field"]='0' # this is a zero field step
meas_type="LT-PTRM-MD" # pTRM tail check
else:
labfield=float(treat[1])*1e-6
MagRec["treatment_dc_field"]='%8.3e' % (labfield) # labfield in tesla (convert from microT)
MagRec["treatment_dc_field_phi"]='%7.1f' % (phi) # labfield phi
MagRec["treatment_dc_field_theta"]='%7.1f' % (theta) # labfield theta
meas_type="LT-T-I:LP-TRM" # trm acquisition experiment
MagRec["measurement_csd"]=rec[2]
MagRec["measurement_magn_moment"]='%10.3e'% (float(rec[3])*1e-3) # moment in Am^2 (from emu)
MagRec["measurement_dec"]=rec[4]
MagRec["measurement_inc"]=rec[5]
MagRec["magic_instrument_codes"]=instcode
MagRec["er_analyst_mail_names"]=user
MagRec["er_citation_names"]=citation
if "LP-IRM-3D" in methcode : meas_type=methcode
#MagRec["magic_method_codes"]=methcode.strip(':')
MagRec["magic_method_codes"]=meas_type
MagRec["measurement_flag"]='g'
MagRec["er_specimen_name"]=rec[0]
if 'std' in rec[0]:
MagRec["measurement_standard"]='s'
else:
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
#print MagRec['treatment_temp']
MagRecs.append(MagRec)
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
if len(SynRecs)>0:
pmag.magic_write(synfile,SynRecs,'er_synthetics')
print("synthetics put in ",synfile)
return True, meas_file
def do_help():
return main.__doc__
if __name__ == "__main__":
main()
| |
from annoying.fields import AutoOneToOneField
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.contrib.auth.models import User
from gettext import gettext as _
from annoying.fields import JSONField
class Clan(models.Model):
chief = models.ForeignKey('Member', null=True, related_name="+")
name = models.CharField(max_length=50, unique=True)
pin = models.CharField(max_length=20, unique=True)
location = models.CharField(max_length=50)
level = models.IntegerField()
def __unicode__(self):
return '%s - %s' % (self.name, self.location)
class Member(models.Model):
user = models.OneToOneField(User)
level = models.IntegerField(null=True, blank=True)
clan = models.ForeignKey(Clan, null=True, blank=True, related_name="members")
name = models.CharField(max_length=50, null=True, blank=True)
def __unicode__(self):
clan = getattr(self, 'clan') if hasattr(self, 'clan') else 'On his own'
name = self.name or self.user.username or self.user.first_name
return "[%s] %s" % (clan, name)
def is_chief(self):
return Clan.objects.filter(chief=self).exists()
def create_member(sender, instance, created, **kwargs):
if created and instance.is_superuser is False:
Member.objects.create(user=instance)
post_save.connect(create_member, sender=User)
def update_member(sender, instance, created, **kwargs):
if not created:
# Check if the member was the chief of another clan he left
clans = Clan.objects.filter(chief=instance)
print instance.clan
if instance.clan:
clans = clans.exclude(pk=instance.clan.pk)
# We loop just in case somehow the member was the chief of
# multiple clans but that should not be possible
for clan in clans:
# get the other members of the clan
members = clan.members.all()
new_chief = members[0] if len(members) else None
clan.chief = new_chief
clan.save()
# If the member joins a clan without a chief, make him the chief
if instance.clan and not instance.clan.chief:
instance.clan.chief = instance
instance.clan.save()
post_save.connect(update_member, sender=Member)
class Troop(models.Model):
TIER_1 = 't1'
TIER_2 = 't2'
TIER_3 = 't3'
DARK = 'd'
CATEGORIES = (
(TIER_1, _("tier 1")),
(TIER_2, _("tier 2")),
(TIER_3, _("tier 3")),
(DARK, _("dark")),
)
name = models.CharField(max_length=50)
category = models.CharField(max_length=10, choices=CATEGORIES)
preferred_target = models.CharField(max_length=250, blank=True, null=True)
attack_type = models.CharField(max_length=250)
housing_space = models.IntegerField()
training_time = models.IntegerField() # in seconds
barack_level_required = models.CharField(max_length=250)
range = models.DecimalField(max_digits=5, decimal_places=2) # in tile
movement_speed = models.DecimalField(max_digits=5, decimal_places=2)
attack_speed = models.DecimalField(max_digits=5, decimal_places=2,
null=True, blank=True)
extra_data = JSONField(null=True, blank=True)
def __unicode__(self):
return self.name
class TroopLevel(models.Model):
troop = models.ForeignKey(Troop)
level = models.IntegerField()
damage_per_second = models.IntegerField(null=True, blank=True)
damage_per_attack = models.DecimalField(max_digits=6, decimal_places=2,
null=True, blank=True)
hitpoints = models.IntegerField()
training_cost = models.IntegerField() # in gold
research_cost = models.IntegerField(null=True, blank=True) # in gold
laboratory_level_required = models.IntegerField(null=True, blank=True)
research_time = models.IntegerField(null=True, blank=True) # in seconds
extra_data = JSONField(null=True, blank=True)
class Meta:
unique_together = ('troop', 'level')
def __unicode__(self):
return "%s - Level %s" % (self.troop, self.level)
class Troops(models.Model):
member = models.ForeignKey(Member)
troop = models.ForeignKey(Troop)
troop_level = models.ForeignKey(TroopLevel)
class Meta:
unique_together = ('member', 'troop')
class Spell(models.Model):
REGULAR = "r"
DARK = "d"
CATEGORIES = (
(REGULAR, _("regular")),
(DARK, _("dark")),
)
name = models.CharField(max_length=250)
category = models.CharField(max_length=15, choices=CATEGORIES)
radius = models.DecimalField(max_digits=5, decimal_places=2) # in tile
number_of_pulses = models.IntegerField(blank=True, null=True)
time_between_pulses = models.DecimalField(max_digits=5, decimal_places=2, blank=True, null=True) # in seconds
housing_space = models.IntegerField()
time_to_brew = models.IntegerField() # in seconds
target = models.CharField(max_length=250, blank=True, null=True)
spell_factory_level_required = models.IntegerField(blank=True, null=True)
extra_data = JSONField(null=True, blank=True)
def __unicode__(self):
return self.name
class SpellLevel(models.Model):
spell = models.ForeignKey(Spell)
level = models.IntegerField()
cost = models.IntegerField() # in gold
research_cost = models.IntegerField(blank=True, null=True) # in gold
research_time = models.IntegerField(blank=True, null=True) # in seconds
laboratory_level_required = models.CharField(max_length=250, blank=True, null=True)
extra_data = JSONField(null=True, blank=True)
class Meta:
unique_together = ('spell', 'level')
def __unicode__(self):
return "%s - Level %s" % (self.spell, self.level)
class Spells(models.Model):
member = models.ForeignKey(Member)
spell = models.ForeignKey(Troop)
spell_level = models.ForeignKey(TroopLevel)
class Meta:
unique_together = ('member', 'spell')
class Hero(models.Model):
name = models.CharField(max_length=50)
attack_type = models.CharField(max_length=250)
movement_speed = models.DecimalField(max_digits=5, decimal_places=2) # in seconds
attack_speed = models.DecimalField(max_digits=5, decimal_places=2) # in seconds
range = models.IntegerField() # in tiles
search_radius = models.IntegerField() # in tiles
def __unicode__(self):
return self.name
class HeroAbility(models.Model):
hero = models.ForeignKey(Hero)
level = models.IntegerField()
damage_increase = models.IntegerField()
health_recovery = models.IntegerField()
ability_time = models.DecimalField(max_digits=5,
decimal_places=2) # in seconds
summoned_unites = models.IntegerField()
extra_data = JSONField(null=True, blank=True)
def __unicode__(self):
return "%s - Level %s" % (self.hero, self.level)
class HeroLevel(models.Model):
hero = models.ForeignKey(Hero)
level = models.IntegerField()
damage_per_second = models.IntegerField()
damage_per_hit = models.DecimalField(max_digits=5, decimal_places=2)
hitpoints = models.IntegerField()
regeneration_time = models.IntegerField() # in seconds
ability_level = models.ForeignKey(HeroAbility, null=True, blank=True)
training_cost = models.IntegerField() # in gold
training_time = models.IntegerField(null=True, blank=True) # in seconds
town_hall_level_required = models.IntegerField()
class Meta:
unique_together = ('hero', 'level')
def __unicode__(self):
ability_level = getattr(self.ability_level, 'level', 'N/A')
return "%s - Level %s - Ability %s" % (self.hero, self.level, ability_level)
class Heros(models.Model):
member = models.ForeignKey(Member)
hero = models.ForeignKey(Hero)
hero_level = models.ForeignKey(HeroLevel)
class Meta:
unique_together = ('member', 'hero')
class TownHall(models.Model):
member = models.ForeignKey(Member)
level = models.IntegerField()
class War(models.Model):
clan = models.ForeignKey(Clan)
enemy_clan = models.CharField(max_length=100)
number_of_participants = models.IntegerField()
members = models.ManyToManyField(Member)
preparation_time_remaining = models.IntegerField() # in seconds
time_remaining = models.IntegerField() # in seconds
class Attack(models.Model):
FIRST = "1"
SECOND = "2"
ITERATIONS = (
(FIRST, _("first")),
(SECOND, _("second")),
)
war = models.ForeignKey(War)
member = models.ForeignKey(Member)
enemy_rank = models.IntegerField()
enemy_town_hall_level = models.IntegerField()
stars = models.IntegerField()
durations = models.IntegerField() # in seconds
iteration = models.CharField(max_length=1, choices=ITERATIONS)
| |
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from ._autogen_fixtures import AutogenFixtureTest
from ... import testing
from ...testing import config
from ...testing import eq_
from ...testing import is_true
from ...testing import TestBase
class AutogenerateIdentityTest(AutogenFixtureTest, TestBase):
__requires__ = ("identity_columns",)
__backend__ = True
def test_add_identity_column(self):
m1 = MetaData()
m2 = MetaData()
Table("user", m1, Column("other", sa.Text))
Table(
"user",
m2,
Column("other", sa.Text),
Column(
"id",
Integer,
sa.Identity(start=5, increment=7),
primary_key=True,
),
)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "add_column")
eq_(diffs[0][2], "user")
eq_(diffs[0][3].name, "id")
i = diffs[0][3].identity
is_true(isinstance(i, sa.Identity))
eq_(i.start, 5)
eq_(i.increment, 7)
def test_remove_identity_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column(
"id",
Integer,
sa.Identity(start=2, increment=3),
primary_key=True,
),
)
Table("user", m2)
diffs = self._fixture(m1, m2)
eq_(diffs[0][0], "remove_column")
eq_(diffs[0][2], "user")
c = diffs[0][3]
eq_(c.name, "id")
is_true(isinstance(c.identity, sa.Identity))
eq_(c.identity.start, 2)
eq_(c.identity.increment, 3)
def test_no_change_identity_column(self):
m1 = MetaData()
m2 = MetaData()
for m in (m1, m2):
Table(
"user",
m,
Column("id", Integer, sa.Identity(start=2)),
)
diffs = self._fixture(m1, m2)
eq_(diffs, [])
@testing.combinations(
(None, dict(start=2)),
(dict(start=2), None),
(dict(start=2), dict(start=2, increment=7)),
(dict(always=False), dict(always=True)),
(
dict(start=1, minvalue=0, maxvalue=100, cycle=True),
dict(start=1, minvalue=0, maxvalue=100, cycle=False),
),
(
dict(start=10, increment=3, maxvalue=9999),
dict(start=10, increment=1, maxvalue=3333),
),
)
@config.requirements.identity_columns_alter
def test_change_identity(self, before, after):
arg_before = (sa.Identity(**before),) if before else ()
arg_after = (sa.Identity(**after),) if after else ()
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, *arg_before),
Column("other", sa.Text),
)
Table(
"user",
m2,
Column("id", Integer, *arg_after),
Column("other", sa.Text),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs[0]), 1)
diffs = diffs[0][0]
eq_(diffs[0], "modify_default")
eq_(diffs[2], "user")
eq_(diffs[3], "id")
old = diffs[5]
new = diffs[6]
def check(kw, idt):
if kw:
is_true(isinstance(idt, sa.Identity))
for k, v in kw.items():
eq_(getattr(idt, k), v)
else:
is_true(idt in (None, False))
check(before, old)
check(after, new)
def test_add_identity_to_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer),
Column("other", sa.Text),
)
Table(
"user",
m2,
Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
Column("other", sa.Text),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs[0]), 1)
diffs = diffs[0][0]
eq_(diffs[0], "modify_default")
eq_(diffs[2], "user")
eq_(diffs[3], "id")
eq_(diffs[5], None)
added = diffs[6]
is_true(isinstance(added, sa.Identity))
eq_(added.start, 2)
eq_(added.maxvalue, 1000)
def test_remove_identity_from_column(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, sa.Identity(start=2, maxvalue=1000)),
Column("other", sa.Text),
)
Table(
"user",
m2,
Column("id", Integer),
Column("other", sa.Text),
)
diffs = self._fixture(m1, m2)
eq_(len(diffs[0]), 1)
diffs = diffs[0][0]
eq_(diffs[0], "modify_default")
eq_(diffs[2], "user")
eq_(diffs[3], "id")
eq_(diffs[6], None)
removed = diffs[5]
is_true(isinstance(removed, sa.Identity))
def test_identity_on_null(self):
m1 = MetaData()
m2 = MetaData()
Table(
"user",
m1,
Column("id", Integer, sa.Identity(start=2, on_null=True)),
Column("other", sa.Text),
)
Table(
"user",
m2,
Column("id", Integer, sa.Identity(start=2, on_null=False)),
Column("other", sa.Text),
)
diffs = self._fixture(m1, m2)
if not config.requirements.supports_identity_on_null.enabled:
eq_(diffs, [])
else:
eq_(len(diffs[0]), 1)
diffs = diffs[0][0]
eq_(diffs[0], "modify_default")
eq_(diffs[2], "user")
eq_(diffs[3], "id")
old = diffs[5]
new = diffs[6]
is_true(isinstance(old, sa.Identity))
is_true(isinstance(new, sa.Identity))
| |
#!/usr/bin/env python3
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2016 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
import setup as _setup
import datetime as dt
# Mock out certain modules while building documentation
class Mock(object):
__all__ = []
def __init__(self, *args, **kw):
pass
def __call__(self, *args, **kw):
return Mock()
def __mul__(self, other):
return Mock()
def __and__(self, other):
return Mock()
def __bool__(self):
return False
def __nonzero__(self):
return False
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return Mock()
sys.modules['ctypes'] = Mock()
sys.modules['numpy'] = Mock()
sys.modules['numpy.lib'] = sys.modules['numpy'].lib
sys.modules['numpy.lib.stride_tricks'] = sys.modules['numpy'].lib.stride_tricks
# -- General configuration ------------------------------------------------
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx']
if on_rtd:
needs_sphinx = '1.4.0'
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
tags.add('rtd')
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = '/usr/share/javascript/mathjax/MathJax.js?config=TeX-AMS_HTML'
templates_path = ['_templates']
source_suffix = '.rst'
#source_encoding = 'utf-8-sig'
master_doc = 'index'
project = _setup.__project__.title()
copyright = '2013-%d %s' % (dt.datetime.now().year, _setup.__author__)
version = _setup.__version__
release = _setup.__version__
#language = None
#today_fmt = '%B %d, %Y'
exclude_patterns = ['_build']
#default_role = None
#add_function_parentheses = True
#add_module_names = True
#show_authors = False
pygments_style = 'sphinx'
#modindex_common_prefix = []
#keep_warnings = False
# -- Autodoc configuration ------------------------------------------------
autodoc_member_order = 'groupwise'
autodoc_default_flags = ['members']
# -- Intersphinx configuration --------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
}
# -- Options for HTML output ----------------------------------------------
if on_rtd:
html_theme = 'sphinx_rtd_theme'
#html_theme_options = {}
#html_theme_path = []
#html_sidebars = {}
else:
html_theme = 'default'
#html_theme_options = {}
#html_theme_path = []
#html_sidebars = {}
html_title = '%s %s Documentation' % (project, version)
#html_short_title = None
#html_logo = None
#html_favicon = None
html_static_path = ['_static']
#html_extra_path = []
#html_last_updated_fmt = '%b %d, %Y'
#html_use_smartypants = True
#html_additional_pages = {}
#html_domain_indices = True
#html_use_index = True
#html_split_index = False
#html_show_sourcelink = True
#html_show_sphinx = True
#html_show_copyright = True
#html_use_opensearch = ''
#html_file_suffix = None
htmlhelp_basename = '%sdoc' % _setup.__project__
# Hack to make wide tables work properly in RTD
# See https://github.com/snide/sphinx_rtd_theme/issues/117 for details
def setup(app):
app.add_stylesheet('style_override.css')
# -- Options for LaTeX output ---------------------------------------------
#latex_engine = 'pdflatex'
latex_elements = {
'papersize': 'a4paper',
'pointsize': '10pt',
'preamble': r'\def\thempfootnote{\arabic{mpfootnote}}', # workaround sphinx issue #2530
}
latex_documents = [
(
'index', # source start file
'%s.tex' % _setup.__project__, # target filename
'%s %s Documentation' % (project, version), # title
_setup.__author__, # author
'manual', # documentclass
True, # documents ref'd from toctree only
),
]
#latex_logo = None
#latex_use_parts = False
latex_show_pagerefs = True
latex_show_urls = 'footnote'
#latex_appendices = []
#latex_domain_indices = True
# -- Options for epub output ----------------------------------------------
epub_basename = _setup.__project__
#epub_theme = 'epub'
#epub_title = html_title
epub_author = _setup.__author__
epub_identifier = 'https://picamera.readthedocs.io/'
#epub_tocdepth = 3
epub_show_urls = 'no'
#epub_use_index = True
# -- Options for manual page output ---------------------------------------
man_pages = []
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = []
#texinfo_appendices = []
#texinfo_domain_indices = True
#texinfo_show_urls = 'footnote'
#texinfo_no_detailmenu = False
# -- Options for linkcheck builder ----------------------------------------
linkcheck_retries = 3
linkcheck_workers = 20
linkcheck_anchors = True
| |
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
import datetime
from django.core.management.color import no_style
from django.db import backend, connection, connections, DEFAULT_DB_ALIAS, IntegrityError
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.test import TestCase, skipUnlessDBFeature, TransactionTestCase
from django.utils import unittest
from regressiontests.backends import models
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
convert_unicode = backend.convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!'),])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
cursor = connection.cursor()
var = cursor.var(backend.Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([unicode(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)',[long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
c = connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.datetime(2010, 1, 1, 0, 0)])
def test_django_extract(self):
"""
Test the custom ``django_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1,2,3),])
self.assertRaises(Exception, cursor.executemany, query, [(1,),])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
self.assert_parses("PostgreSQL 8.3 beta4", (8, 3, None))
self.assert_parses("PostgreSQL 8.3", (8, 3, None))
self.assert_parses("EnterpriseDB 8.3", (8, 3, None))
self.assert_parses("PostgreSQL 8.3.6", (8, 3, 6))
self.assert_parses("PostgreSQL 8.4beta1", (8, 4, None))
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", (8, 3, 1))
# Unfortunately with sqlite3 the in-memory test database cannot be
# closed, and so it cannot be re-opened during testing, and so we
# sadly disable this test for now.
class ConnectionCreatedSignalTest(TestCase):
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
cursor = connection.cursor()
self.assertTrue(data["connection"] is connection)
connection_created.disconnect(receiver)
data.clear()
cursor = connection.cursor()
self.assertTrue(data == {})
class BackendTestCase(TestCase):
def test_cursor_executemany(self):
#4896: Test cursor.executemany
cursor = connection.cursor()
qn = connection.ops.quote_name
opts = models.Square._meta
f1, f2 = opts.get_field('root'), opts.get_field('square')
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)'
% (connection.introspection.table_name_converter(opts.db_table), qn(f1.column), qn(f2.column)))
cursor.executemany(query, [(i, i**2) for i in range(-5, 6)])
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
#4765: executemany with params=[] does nothing
cursor.executemany(query, [])
self.assertEqual(models.Square.objects.count(), 11)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), (u'Clark', u'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [(u'Jane', u'Doe'), (u'John', u'Doe')])
self.assertEqual(list(cursor.fetchall()), [(u'Mary', u'Agnelline'), (u'Peter', u'Parker')])
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
pass
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
pass
| |
#
# Base classes for GPIO hardware
#
import RPi.GPIO as gpio
import time, os
from collections import defaultdict
from collections.abc import Iterable
from threading import Thread
from traceback import print_exc, format_exc
def delay(microseconds):
time.sleep(microseconds/1000000)
def waitms(milliseconds):
time.sleep(milliseconds/1000)
BASE_DIR = os.environ.get("IO_BASE_DIR","/var/run/")
class FIFOFile:
def __init__(self, fn, num=0):
self.__filename = os.path.join(BASE_DIR, fn + str(num))
try:
os.unlink(self.__filename)
except FileNotFoundError:
pass
umask = os.umask(0)
os.mkfifo(self.__filename)
self.__fd = open(os.open(self.__filename, os.O_RDWR|os.O_NONBLOCK), "wb+", 0)
os.umask(umask)
def close(self):
self.__fd.close()
try:
os.unlink(self.__filename)
except FileNotFoundError:
pass
def __getattr__(self, name):
return getattr(self.__fd, name)
class Component:
def __init__(self, fn=None, numchannels=1, offset=0):
super().__init__()
self.__fn = fn if fn else self._FN
self.__channels = range(offset, numchannels + offset)
self.__readdata = defaultdict(bytes)
self.__initialized = False
def writedata(self, data, channel=0):
if hasattr(data, "__iter__"):
data = ",".join([str(i) for i in data])
if not isinstance(data, bytes):
data = str(data).encode()
self.__fifos[channel].write(data + b"\n")
def readdata(self, channel=0):
data = self.__fifos[channel].read()
if data:
self.__readdata[channel] += data
if b"\n" in self.__readdata[channel]:
data, newline, self.__readdata[channel] = self.__readdata[channel].partition(b"\n")
text = data.decode()
if "," in text:
return tuple([float(i) for i in text.split(",")])
else:
return float(text)
else:
return None
def _checkInit(self,quiet=False):
if quiet:
return self.__initialized
if not self.__initialized:
raise RuntimeError("This {} has not been initialized yet!".format(
self.__class__.__name__))
def _set_init(self):
self.__initialized = True
def init(self):
if len(self.__channels) < 2:
self.__fifos = [FIFOFile(self.__fn)]
else:
self.__fifos = []
for i in self.__channels:
self.__fifos.append(FIFOFile(self.__fn, i))
def cleanup(self):
for i in self.__fifos:
i.close()
self.__initialized = False
def __enter__(self):
self.init()
return self
def __exit__(self, type, value, tb):
self.cleanup()
class GPIOComponent(Component):
def __init__(self, outpins=(), inpins=(), *args, **kwargs):
super().__init__(*args, **kwargs)
self.__out_pins = outpins
self.__in_pins = inpins
def init(self):
super().init()
if self._checkInit(True):
try:
self.cleanup()
finally:
pass
gpio.setmode(gpio.BCM)
for ch in self.__out_pins:
try:
gpio.setup(ch, gpio.OUT, initial=gpio.LOW)
except ValueError as err:
print("Error setting up pin {}. ({})".format(ch, repr(err)))
for ch in self.__in_pins:
try:
gpio.setup(ch, gpio.IN)
except ValueError as err:
print("Error setting up pin {}. ({})".format(ch, repr(err)))
self._set_init()
def cleanup(self):
if not self._checkInit(True):
return False
super().cleanup()
for ch in self.__out_pins + self.__in_pins:
gpio.cleanup(ch)
return True
class I2CComponent(Component):
def __init__(self, addr=None, *args, **kwargs):
self._address = addr
super().__init__(*args, **kwargs)
class EventedInput:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__next_id = -1
self.__handlers = defaultdict(dict)
def _get_handlers(self,pin=None,generic=False):
if (pin is None) and (not generic):
raise TypeError("Must supply pin for non generic handler!")
return self.__handlers["generic"] if generic else self.__handlers[pin]
def add_handler(self,callback,pin=None,generic=False):
self.__next_id += 1
self._get_handlers(pin,generic)[self.__next_id] = callback
return self.__next_id
def remove_handler(self,hid,pin=None,generic=False):
del self._get_handlers(pin,generic)[hid]
def _handle_pin(self,pin):
for i in (list(self._get_handlers(generic=True).values())
+ list(self._get_handlers(pin).values())):
try:
i(pin)
except Exception as e:
print_exc()
class LoopedComponent:
"""Subclasses must implement a tick method and define _mswait"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.thread = Thread(target=self.runloop)
self.thread.daemon = True
self.__started = False
self.thread.start()
def init(self, autostart=False):
super().init()
self._set_init()
if autostart:
self.start()
def cleanup(self):
if self.__started:
self.stop()
super().cleanup()
def start(self):
self._checkInit()
self.__started = True
def stop(self):
self.__started = False
def runloop(self):
while True:
if self.__started:
self.tick()
waitms(self._mswait)
| |
from sklearn import preprocessing
from sklearn import metrics, cross_validation
from sklearn.grid_search import GridSearchCV
from sklearn import naive_bayes, svm, linear_model, neighbors, tree, ensemble
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectPercentile, f_classif
import numpy as np
import pickle
import copy
import os.path
# inicio constantes
CLF_KNN = "KNN"
CLF_NBMULTINOMIAL = "NB Multinomial"
CLF_SVM_L = "SVM-L"
CLF_SVM_R = "SVM-R"
CLF_DTREE = "Decision Tree"
CLF_B_DTREE = "B. Decision Tree"
PARAM_VERBOSE = 0
PARAM_JOBS = -1
SVM_PARAM_JOBS = 1
BEST_REPRESENTATION = 2
KNN_RANGE = list(range(1,7))
DEGREE_RANGE = list(range(2,5))
BIN_RANGE = list((10.0**i) for i in range(-3,3))
RANGE7 = list((10.0**i) for i in range(-7,7))
RANGE3 = list((10.0**i) for i in range(-3,3))
EST_RANGE = [50,100,150,200,250]
K_FOLD = 10
CV_K_FOLD = 5
SVC_MAX_ITER = 100000
## fim constantes
def run_featureSelection(X,Y, use_norm = False):
i = 5
melhorQtde = 0
maxFmedida = 0.00
clf = neighbors.KNeighborsClassifier()
X_norm = copy.copy(X)
if use_norm:
# representative normalization
indices = np.random.random_integers(0,np.shape(X_norm)[0]-1,np.shape(X_norm)[0]/5)
normalization = preprocessing.StandardScaler().fit(X_norm[indices])
X_norm = normalization.transform(X_norm)
while (i <= 100):
aux_X = SelectPercentile(f_classif, percentile=i).fit_transform(X_norm, Y)
scores = cross_validation.cross_val_score(clf, aux_X, Y, cv=5, scoring='f1',n_jobs=-1)
if (maxFmedida < scores.mean()):
maxFmedida = scores.mean()
melhorQtde = i
i = i + 5
return SelectPercentile(f_classif, percentile=melhorQtde).fit_transform(X, Y)
def runGridSearch(clf_nome, X, Y):
parametros = {}
if(clf_nome==CLF_KNN):
tuned_params = [{'n_neighbors': KNN_RANGE, 'weights': ['uniform', 'distance']}]
clf = GridSearchCV(neighbors.KNeighborsClassifier(), tuned_params, cv=CV_K_FOLD, scoring="f1", verbose=PARAM_VERBOSE)
clf.fit(X,Y)
parametros["k"] = clf.best_estimator_.n_neighbors
parametros["weights"] = clf.best_estimator_.weights
parametros["resultado"] = "K: %f - Weight: %s" % (clf.best_estimator_.n_neighbors, clf.best_estimator_.weights)
clf_configurado = neighbors.KNeighborsClassifier(n_neighbors=parametros["k"], weights=parametros["weights"])
elif(clf_nome==CLF_NBMULTINOMIAL):
tuned_params = [{'alpha': RANGE7}]
clf = GridSearchCV(naive_bayes.MultinomialNB(), tuned_params, cv=CV_K_FOLD, scoring="f1", verbose=PARAM_VERBOSE, n_jobs=PARAM_JOBS)
clf.fit(X, Y)
parametros["alpha"] = clf.best_estimator_.alpha
parametros["resultado"] = "Alpha: %f" % (clf.best_estimator_.alpha)
clf_configurado = naive_bayes.MultinomialNB(alpha=parametros["alpha"])
elif(clf_nome==CLF_SVM_L):
tuned_params = [{'kernel': ['linear'], 'C': RANGE3}]
clf = GridSearchCV(svm.SVC(max_iter=SVC_MAX_ITER), tuned_params, cv=CV_K_FOLD, scoring="f1", verbose=PARAM_VERBOSE, n_jobs=PARAM_JOBS)
clf.fit(X, Y)
parametros["c"] = clf.best_estimator_.C
parametros["kernel"] = clf.best_estimator_.kernel
parametros["resultado"] = "Kernel: %s - C: %f" % (clf.best_estimator_.kernel,clf.best_estimator_.C)
clf_configurado = svm.SVC(max_iter=SVC_MAX_ITER,kernel=parametros["kernel"],C=parametros["c"])
elif(clf_nome==CLF_SVM_R):
tuned_params = [{'kernel': ['rbf'], 'C': RANGE3, 'gamma': RANGE3}]
clf = GridSearchCV(svm.SVC(max_iter=SVC_MAX_ITER), tuned_params, cv=CV_K_FOLD, scoring="f1", verbose=PARAM_VERBOSE, n_jobs=SVM_PARAM_JOBS)
clf.fit(X, Y)
parametros["c"] = clf.best_estimator_.C
parametros["gamma"] = clf.best_estimator_.gamma
parametros["kernel"] = clf.best_estimator_.kernel
parametros["resultado"] = "Kernel: %s - C: %f - Gamma: %f " % (clf.best_estimator_.kernel, clf.best_estimator_.C, clf.best_estimator_.gamma)
clf_configurado = svm.SVC(max_iter=SVC_MAX_ITER,kernel=parametros["kernel"],C=parametros["c"], gamma=parametros["gamma"])
elif(clf_nome==CLF_DTREE):
tuned_params = [{'criterion': ['gini','entropy'], 'max_features': ['sqrt','log2']}]
clf = GridSearchCV(tree.DecisionTreeClassifier(random_state=0), tuned_params, cv=10, scoring="f1", verbose=PARAM_VERBOSE, n_jobs=PARAM_JOBS)
clf.fit(X, Y)
parametros["criterion"] = clf.best_estimator_.criterion
parametros["max_features"] = clf.best_estimator_.max_features
parametros["resultado"] = "Criterion: %s - Max_Features: %s " % (parametros["criterion"], parametros["max_features"])
clf_configurado = tree.DecisionTreeClassifier(random_state=0,criterion=parametros["criterion"],max_features=parametros["max_features"])
elif(clf_nome==CLF_B_DTREE):
tuned_params = [{'criterion': ['gini','entropy'], 'max_features': ['sqrt','log2']}]
clf = GridSearchCV(tree.DecisionTreeClassifier(random_state=0), tuned_params, cv=10, scoring="f1", verbose=PARAM_VERBOSE, n_jobs=PARAM_JOBS)
clf.fit(X, Y)
parametros["criterion"] = clf.best_estimator_.criterion
parametros["max_features"] = clf.best_estimator_.max_features
parametros["resultado"] = "Criterion: %s - Max_Features: %s " % (parametros["criterion"], parametros["max_features"])
clf_configurado = tree.DecisionTreeClassifier(random_state=0,criterion=parametros["criterion"],max_features=parametros["max_features"])
tuned_params = [{'n_estimators': [50,100,250,500]}]
clf = GridSearchCV(ensemble.AdaBoostClassifier(clf_configurado), tuned_params, cv=10, scoring="f1", verbose=PARAM_VERBOSE, n_jobs=PARAM_JOBS)
clf.fit(X, Y)
parametros["n_estimator"] = clf.best_estimator_.n_estimators
clf_configurado = ensemble.AdaBoostClassifier(clf_configurado,n_estimators=parametros["n_estimator"])
return clf_configurado, clf.best_score_
def runExperiment(clf, train_cv, test_cv):
partitionSamples = []
trainSamples = []
testSamples = []
modelSelectionSamples = []
# define houldout partition, but it changes for each "fold"
holdoutTrainIdx = numpy.sort(train_cv)
holdoutTestIdx = numpy.sort(test_cv)
for j in range(len(representacoes)):
normalization = preprocessing.StandardScaler().fit(representacoes[j][holdoutTrainIdx])
auxPartitionSamples = normalization.transform(representacoes[j][holdoutTrainIdx])
auxTestSamples = normalization.transform(representacoes[j][holdoutTestIdx])
partitionSamples.append(auxPartitionSamples)
testSamples.append(auxTestSamples)
partitionSamples = copy.copy(partitionSamples)
testSamples = copy.copy(testSamples)
partitionClasses = copy.copy(classes[holdoutTrainIdx])
testClasses = copy.copy(classes[holdoutTestIdx])
modelSelectionCV = cross_validation.ShuffleSplit(len(partitionClasses), n_iter=1, test_size=0.25, random_state=0)
auxTrainIndexes = []
auxModelSelectionIndexes = []
for auxTrainIndexes, auxModelSelectionIndexes in modelSelectionCV:
break
trainClasses = partitionClasses[auxTrainIndexes]
modelSelectionClasses = partitionClasses[auxModelSelectionIndexes]
for k in range(len(representacoes)):
trainSamples.append(partitionSamples[k][auxTrainIndexes])
modelSelectionSamples.append(partitionSamples[k][auxModelSelectionIndexes])
classifier, _ = runGridSearch(clf,representacoes[0],classes)
# fit data
classifier = classifier.fit(trainSamples[0],trainClasses)
predicted_labels = []
# evalute final result
predicted_labels = classifier.predict(testSamples[0])
acc = metrics.accuracy_score(testClasses,predicted_labels)
precision, recall, f1, support = metrics.precision_recall_fscore_support(testClasses,predicted_labels)
return acc, f1
representacoes = []
representacoes_file = 'representacoes.dat'
f = open(representacoes_file, 'r')
representacoes = pickle.load(f)
classes = pickle.load(f)
classes = np.asarray(classes)
clfs = [ CLF_KNN, CLF_SVM_L, CLF_SVM_R, CLF_DTREE, CLF_B_DTREE ]
# add feature selection without norm
qtde = len(representacoes)
for idx in range(qtde):
representacoes.append(run_featureSelection(representacoes[idx],classes))
# add feature selection with norm
for idx in range(qtde):
representacoes.append(run_featureSelection(representacoes[idx],classes,use_norm=True))
representacoes = [ representacoes[BEST_REPRESENTATION] ]
acc = []
f1 = []
cv = cross_validation.StratifiedKFold(classes,n_folds=10,random_state=0)
for clf in clfs:
for train_index, test_index in cv:
auxAcc, auxF1 = runExperiment(clf,train_index,test_index)
acc.append(auxAcc)
f1.append(auxF1)
print clf
print 'ACC:', numpy.mean(acc), '+/-', numpy.std(acc)
print 'F1:', numpy.mean(f1), '+/-', numpy.std(f1)
print ''
| |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""WSGI support for the Tornado web framework.
WSGI is the Python standard for web servers, and allows for interoperability
between Tornado and other Python web frameworks and servers. This module
provides WSGI support in two ways:
* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
interface. This is useful for running a Tornado app on another
HTTP server, such as Google App Engine. See the `WSGIAdapter` class
documentation for limitations that apply.
* `WSGIContainer` lets you run other WSGI applications and frameworks on the
Tornado HTTP server. For example, with this class you can mix Django
and Tornado handlers in a single server.
"""
from __future__ import absolute_import, division, print_function
import sys
from io import BytesIO
import tornado
from tornado.concurrent import Future
from tornado import escape
from tornado import httputil
from tornado.log import access_log
from tornado import web
from tornado.escape import native_str
from tornado.util import unicode_type, PY3
if PY3:
import urllib.parse as urllib_parse # py3
else:
import urllib as urllib_parse
# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
# that are smuggled inside objects of type unicode (via the latin1 encoding).
# These functions are like those in the tornado.escape module, but defined
# here to minimize the temptation to use them in non-wsgi contexts.
if str is unicode_type:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s.decode('latin1')
def from_wsgi_str(s):
assert isinstance(s, str)
return s.encode('latin1')
else:
def to_wsgi_str(s):
assert isinstance(s, bytes)
return s
def from_wsgi_str(s):
assert isinstance(s, str)
return s
class WSGIApplication(web.Application):
"""A WSGI equivalent of `tornado.web.Application`.
.. deprecated:: 4.0
Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
"""
def __call__(self, environ, start_response):
return WSGIAdapter(self)(environ, start_response)
# WSGI has no facilities for flow control, so just return an already-done
# Future when the interface requires it.
_dummy_future = Future()
_dummy_future.set_result(None)
class _WSGIConnection(httputil.HTTPConnection):
def __init__(self, method, start_response, context):
self.method = method
self.start_response = start_response
self.context = context
self._write_buffer = []
self._finished = False
self._expected_content_remaining = None
self._error = None
def set_close_callback(self, callback):
# WSGI has no facility for detecting a closed connection mid-request,
# so we can simply ignore the callback.
pass
def write_headers(self, start_line, headers, chunk=None, callback=None):
if self.method == 'HEAD':
self._expected_content_remaining = 0
elif 'Content-Length' in headers:
self._expected_content_remaining = int(headers['Content-Length'])
else:
self._expected_content_remaining = None
self.start_response(
'%s %s' % (start_line.code, start_line.reason),
[(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
if chunk is not None:
self.write(chunk, callback)
elif callback is not None:
callback()
return _dummy_future
def write(self, chunk, callback=None):
if self._expected_content_remaining is not None:
self._expected_content_remaining -= len(chunk)
if self._expected_content_remaining < 0:
self._error = httputil.HTTPOutputError(
"Tried to write more data than Content-Length")
raise self._error
self._write_buffer.append(chunk)
if callback is not None:
callback()
return _dummy_future
def finish(self):
if (self._expected_content_remaining is not None and
self._expected_content_remaining != 0):
self._error = httputil.HTTPOutputError(
"Tried to write %d bytes less than Content-Length" %
self._expected_content_remaining)
raise self._error
self._finished = True
class _WSGIRequestContext(object):
def __init__(self, remote_ip, protocol):
self.remote_ip = remote_ip
self.protocol = protocol
def __str__(self):
return self.remote_ip
class WSGIAdapter(object):
"""Converts a `tornado.web.Application` instance into a WSGI application.
Example usage::
import tornado.web
import tornado.wsgi
import wsgiref.simple_server
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
wsgi_app = tornado.wsgi.WSGIAdapter(application)
server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
server.serve_forever()
See the `appengine demo
<https://github.com/tornadoweb/tornado/tree/stable/demos/appengine>`_
for an example of using this module to run a Tornado app on Google
App Engine.
In WSGI mode asynchronous methods are not supported. This means
that it is not possible to use `.AsyncHTTPClient`, or the
`tornado.auth` or `tornado.websocket` modules.
.. versionadded:: 4.0
"""
def __init__(self, application):
if isinstance(application, WSGIApplication):
self.application = lambda request: web.Application.__call__(
application, request)
else:
self.application = application
def __call__(self, environ, start_response):
method = environ["REQUEST_METHOD"]
uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
if environ.get("QUERY_STRING"):
uri += "?" + environ["QUERY_STRING"]
headers = httputil.HTTPHeaders()
if environ.get("CONTENT_TYPE"):
headers["Content-Type"] = environ["CONTENT_TYPE"]
if environ.get("CONTENT_LENGTH"):
headers["Content-Length"] = environ["CONTENT_LENGTH"]
for key in environ:
if key.startswith("HTTP_"):
headers[key[5:].replace("_", "-")] = environ[key]
if headers.get("Content-Length"):
body = environ["wsgi.input"].read(
int(headers["Content-Length"]))
else:
body = b""
protocol = environ["wsgi.url_scheme"]
remote_ip = environ.get("REMOTE_ADDR", "")
if environ.get("HTTP_HOST"):
host = environ["HTTP_HOST"]
else:
host = environ["SERVER_NAME"]
connection = _WSGIConnection(method, start_response,
_WSGIRequestContext(remote_ip, protocol))
request = httputil.HTTPServerRequest(
method, uri, "HTTP/1.1", headers=headers, body=body,
host=host, connection=connection)
request._parse_body()
self.application(request)
if connection._error:
raise connection._error
if not connection._finished:
raise Exception("request did not finish synchronously")
return connection._write_buffer
class WSGIContainer(object):
r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
.. warning::
WSGI is a *synchronous* interface, while Tornado's concurrency model
is based on single-threaded asynchronous execution. This means that
running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
than running the same app in a multi-threaded WSGI server like
``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are
benefits to combining Tornado and WSGI in the same process that
outweigh the reduced scalability.
Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
run it. For example::
def simple_app(environ, start_response):
status = "200 OK"
response_headers = [("Content-type", "text/plain")]
start_response(status, response_headers)
return ["Hello world!\n"]
container = tornado.wsgi.WSGIContainer(simple_app)
http_server = tornado.httpserver.HTTPServer(container)
http_server.listen(8888)
tornado.ioloop.IOLoop.current().start()
This class is intended to let other frameworks (Django, web.py, etc)
run on the Tornado HTTP server and I/O loop.
The `tornado.web.FallbackHandler` class is often useful for mixing
Tornado and WSGI apps in the same server. See
https://github.com/bdarnell/django-tornado-demo for a complete example.
"""
def __init__(self, wsgi_application):
self.wsgi_application = wsgi_application
def __call__(self, request):
data = {}
response = []
def start_response(status, response_headers, exc_info=None):
data["status"] = status
data["headers"] = response_headers
return response.append
app_response = self.wsgi_application(
WSGIContainer.environ(request), start_response)
try:
response.extend(app_response)
body = b"".join(response)
finally:
if hasattr(app_response, "close"):
app_response.close()
if not data:
raise Exception("WSGI app did not call start_response")
status_code, reason = data["status"].split(' ', 1)
status_code = int(status_code)
headers = data["headers"]
header_set = set(k.lower() for (k, v) in headers)
body = escape.utf8(body)
if status_code != 304:
if "content-length" not in header_set:
headers.append(("Content-Length", str(len(body))))
if "content-type" not in header_set:
headers.append(("Content-Type", "text/html; charset=UTF-8"))
if "server" not in header_set:
headers.append(("Server", "TornadoServer/%s" % tornado.version))
start_line = httputil.ResponseStartLine("HTTP/1.1", status_code, reason)
header_obj = httputil.HTTPHeaders()
for key, value in headers:
header_obj.add(key, value)
request.connection.write_headers(start_line, header_obj, chunk=body)
request.connection.finish()
self._log(status_code, request)
@staticmethod
def environ(request):
"""Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
"""
hostport = request.host.split(":")
if len(hostport) == 2:
host = hostport[0]
port = int(hostport[1])
else:
host = request.host
port = 443 if request.protocol == "https" else 80
environ = {
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": to_wsgi_str(escape.url_unescape(
request.path, encoding=None, plus=False)),
"QUERY_STRING": request.query,
"REMOTE_ADDR": request.remote_ip,
"SERVER_NAME": host,
"SERVER_PORT": str(port),
"SERVER_PROTOCOL": request.version,
"wsgi.version": (1, 0),
"wsgi.url_scheme": request.protocol,
"wsgi.input": BytesIO(escape.utf8(request.body)),
"wsgi.errors": sys.stderr,
"wsgi.multithread": False,
"wsgi.multiprocess": True,
"wsgi.run_once": False,
}
if "Content-Type" in request.headers:
environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
if "Content-Length" in request.headers:
environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
for key, value in request.headers.items():
environ["HTTP_" + key.replace("-", "_").upper()] = value
return environ
def _log(self, status_code, request):
if status_code < 400:
log_method = access_log.info
elif status_code < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * request.request_time()
summary = request.method + " " + request.uri + " (" + \
request.remote_ip + ")"
log_method("%d %s %.2fms", status_code, summary, request_time)
HTTPRequest = httputil.HTTPServerRequest
| |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import mock
import operator
import optparse
import unittest
from blinkpy.common.path_finder import RELATIVE_WEB_TESTS
from blinkpy.common.host_mock import MockHost
from blinkpy.common.system.executive_mock import MockExecutive
from blinkpy.common.system.log_testing import LoggingTestCase
from blinkpy.common.system.output_capture import OutputCapture
from blinkpy.common.system.platform_info_mock import MockPlatformInfo
from blinkpy.common.system.system_host import SystemHost
from blinkpy.common.system.system_host_mock import MockSystemHost
from blinkpy.web_tests.port.base import Port, VirtualTestSuite
from blinkpy.web_tests.port.factory import PortFactory
from blinkpy.web_tests.port.test import (add_unit_tests_to_mock_filesystem,
add_manifest_to_mock_filesystem,
WEB_TEST_DIR, TestPort)
MOCK_WEB_TESTS = '/mock-checkout/' + RELATIVE_WEB_TESTS
class PortTest(LoggingTestCase):
def make_port(self,
executive=None,
with_tests=False,
port_name=None,
**kwargs):
host = MockHost()
if executive:
host.executive = executive
if with_tests:
add_unit_tests_to_mock_filesystem(host.filesystem)
return TestPort(host, **kwargs)
return Port(host, port_name or 'baseport', **kwargs)
def test_validate_wpt_dirs(self):
# Keys should not have trailing slashes.
for wpt_path in Port.WPT_DIRS.keys():
self.assertFalse(wpt_path.endswith('/'))
# Values should not be empty (except the last one).
for url_prefix in list(Port.WPT_DIRS.values())[:-1]:
self.assertNotEqual(url_prefix, '/')
self.assertEqual(list(Port.WPT_DIRS.values())[-1], '/')
def test_validate_wpt_regex(self):
self.assertEquals(
Port.WPT_REGEX.match('external/wpt/foo/bar.html').groups(),
('external/wpt', 'foo/bar.html'))
self.assertEquals(
Port.WPT_REGEX.match('virtual/test/external/wpt/foo/bar.html').
groups(), ('external/wpt', 'foo/bar.html'))
self.assertEquals(
Port.WPT_REGEX.match('wpt_internal/foo/bar.html').groups(),
('wpt_internal', 'foo/bar.html'))
self.assertEquals(
Port.WPT_REGEX.match('virtual/test/wpt_internal/foo/bar.html').
groups(), ('wpt_internal', 'foo/bar.html'))
def test_setup_test_run(self):
port = self.make_port()
# This routine is a no-op. We just test it for coverage.
port.setup_test_run()
def test_test_dirs(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.web_tests_dir() + '/canvas/test', '')
port.host.filesystem.write_text_file(
port.web_tests_dir() + '/css2.1/test', '')
dirs = port.test_dirs()
self.assertIn('canvas', dirs)
self.assertIn('css2.1', dirs)
def test_get_option__set(self):
options, _ = optparse.OptionParser().parse_args([])
options.foo = 'bar'
port = self.make_port(options=options)
self.assertEqual(port.get_option('foo'), 'bar')
def test_get_option__unset(self):
port = self.make_port()
self.assertIsNone(port.get_option('foo'))
def test_get_option__default(self):
port = self.make_port()
self.assertEqual(port.get_option('foo', 'bar'), 'bar')
def test_output_filename(self):
port = self.make_port()
# Normal test filename
test_file = 'fast/test.html'
self.assertEqual(
port.output_filename(test_file, '-expected', '.txt'),
'fast/test-expected.txt')
self.assertEqual(
port.output_filename(test_file, '-expected-mismatch', '.png'),
'fast/test-expected-mismatch.png')
# Test filename with query string
test_file = 'fast/test.html?wss&run_type=1'
self.assertEqual(
port.output_filename(test_file, '-expected', '.txt'),
'fast/test_wss_run_type=1-expected.txt')
self.assertEqual(
port.output_filename(test_file, '-actual', '.png'),
'fast/test_wss_run_type=1-actual.png')
# Test filename with query string containing a dot
test_file = 'fast/test.html?include=HTML.*'
self.assertEqual(
port.output_filename(test_file, '-expected', '.txt'),
'fast/test_include=HTML._-expected.txt')
self.assertEqual(
port.output_filename(test_file, '-actual', '.png'),
'fast/test_include=HTML._-actual.png')
def test_expected_baselines_basic(self):
port = self.make_port(port_name='foo')
port.FALLBACK_PATHS = {'': ['foo']}
test_file = 'fast/test.html'
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'VirtualTestSuites', '[]')
# The default baseline doesn't exist.
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(None, 'fast/test-expected.txt')])
self.assertIsNone(
port.expected_filename(test_file, '.txt', return_default=False))
self.assertEqual(
port.expected_filename(test_file, '.txt'),
MOCK_WEB_TESTS + 'fast/test-expected.txt')
self.assertIsNone(port.fallback_expected_filename(test_file, '.txt'))
# The default baseline exists.
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(MOCK_WEB_TESTS[:-1], 'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt', return_default=False),
MOCK_WEB_TESTS + 'fast/test-expected.txt')
self.assertEqual(
port.expected_filename(test_file, '.txt'),
MOCK_WEB_TESTS + 'fast/test-expected.txt')
self.assertIsNone(port.fallback_expected_filename(test_file, '.txt'))
port.host.filesystem.remove(MOCK_WEB_TESTS + 'fast/test-expected.txt')
def test_expected_baselines_mismatch(self):
port = self.make_port(port_name='foo')
port.FALLBACK_PATHS = {'': ['foo']}
test_file = 'fast/test.html'
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'VirtualTestSuites', '[]')
self.assertEqual(
port.expected_baselines(test_file, '.txt', match=False),
[(None, 'fast/test-expected-mismatch.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt', match=False),
MOCK_WEB_TESTS + 'fast/test-expected-mismatch.txt')
def test_expected_baselines_platform_specific(self):
port = self.make_port(port_name='foo')
port.FALLBACK_PATHS = {'': ['foo']}
test_file = 'fast/test.html'
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'VirtualTestSuites', '[]')
self.assertEqual(port.baseline_version_dir(),
MOCK_WEB_TESTS + 'platform/foo')
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt', 'foo')
# The default baseline doesn't exist.
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(MOCK_WEB_TESTS + 'platform/foo', 'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt'),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(test_file, '.txt', return_default=False),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
self.assertIsNone(port.fallback_expected_filename(test_file, '.txt'))
# The default baseline exists.
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(MOCK_WEB_TESTS + 'platform/foo', 'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt'),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(test_file, '.txt', return_default=False),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
self.assertEquals(
port.fallback_expected_filename(test_file, '.txt'),
MOCK_WEB_TESTS + 'fast/test-expected.txt')
port.host.filesystem.remove(MOCK_WEB_TESTS + 'fast/test-expected.txt')
def test_expected_baselines_flag_specific(self):
port = self.make_port(port_name='foo')
port.FALLBACK_PATHS = {'': ['foo']}
test_file = 'fast/test.html'
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'VirtualTestSuites', '[]')
# pylint: disable=protected-access
port._options.additional_platform_directory = []
port._options.additional_driver_flag = ['--special-flag']
self.assertEqual(port.baseline_search_path(), [
MOCK_WEB_TESTS + 'flag-specific/special-flag/platform/foo',
MOCK_WEB_TESTS + 'flag-specific/special-flag',
MOCK_WEB_TESTS + 'platform/foo'
])
self.assertEqual(
port.baseline_version_dir(),
MOCK_WEB_TESTS + 'flag-specific/special-flag/platform/foo')
# Flag-specific baseline
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt', 'foo')
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS +
'flag-specific/special-flag/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(MOCK_WEB_TESTS + 'flag-specific/special-flag',
'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt'), MOCK_WEB_TESTS +
'flag-specific/special-flag/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(test_file, '.txt',
return_default=False), MOCK_WEB_TESTS +
'flag-specific/special-flag/fast/test-expected.txt')
self.assertEqual(
port.fallback_expected_filename(test_file, '.txt'),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
# Flag-specific platform-specific baseline
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS +
'flag-specific/special-flag/platform/foo/fast/test-expected.txt',
'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(MOCK_WEB_TESTS + 'flag-specific/special-flag/platform/foo',
'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt'), MOCK_WEB_TESTS +
'flag-specific/special-flag/platform/foo/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(test_file, '.txt',
return_default=False), MOCK_WEB_TESTS +
'flag-specific/special-flag/platform/foo/fast/test-expected.txt')
self.assertEqual(
port.fallback_expected_filename(test_file, '.txt'), MOCK_WEB_TESTS
+ 'flag-specific/special-flag/fast/test-expected.txt')
def test_expected_baselines_virtual(self):
port = self.make_port(port_name='foo')
port.FALLBACK_PATHS = {'': ['foo']}
virtual_test = 'virtual/flag/fast/test.html'
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'VirtualTestSuites',
'[{ "prefix": "flag", "bases": ["fast"], "args": ["--flag"]}]')
# The default baseline for base test
self.assertEqual(
port.expected_baselines(virtual_test, '.txt'),
[(None, 'virtual/flag/fast/test-expected.txt')])
self.assertIsNone(
port.expected_filename(virtual_test, '.txt', return_default=False))
self.assertEqual(
port.expected_filename(virtual_test, '.txt'),
MOCK_WEB_TESTS + 'fast/test-expected.txt')
self.assertIsNone(
port.expected_filename(
virtual_test,
'.txt',
return_default=False,
fallback_base_for_virtual=False))
self.assertEqual(
port.expected_filename(
virtual_test, '.txt', fallback_base_for_virtual=False),
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt')
self.assertIsNone(
port.fallback_expected_filename(virtual_test, '.txt'))
# Platform-specific baseline for base test
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(virtual_test, '.txt'),
[(None, 'virtual/flag/fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(virtual_test, '.txt', return_default=False),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(virtual_test, '.txt'),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
self.assertIsNone(
port.expected_filename(
virtual_test,
'.txt',
return_default=False,
fallback_base_for_virtual=False))
self.assertEqual(
port.expected_filename(
virtual_test, '.txt', fallback_base_for_virtual=False),
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.fallback_expected_filename(virtual_test, '.txt'),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
# The default baseline for virtual test
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(virtual_test, '.txt'),
[(MOCK_WEB_TESTS[:-1], 'virtual/flag/fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(virtual_test, '.txt', return_default=False),
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(virtual_test, '.txt'),
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(
virtual_test,
'.txt',
return_default=False,
fallback_base_for_virtual=False),
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(
virtual_test, '.txt', fallback_base_for_virtual=False),
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.fallback_expected_filename(virtual_test, '.txt'),
MOCK_WEB_TESTS + 'platform/foo/fast/test-expected.txt')
# Platform-specific baseline for virtual test
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS +
'platform/foo/virtual/flag/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(virtual_test, '.txt'),
[(MOCK_WEB_TESTS + 'platform/foo',
'virtual/flag/fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(virtual_test, '.txt',
return_default=False), MOCK_WEB_TESTS +
'platform/foo/virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(virtual_test, '.txt'), MOCK_WEB_TESTS +
'platform/foo/virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(
virtual_test,
'.txt',
return_default=False,
fallback_base_for_virtual=False), MOCK_WEB_TESTS +
'platform/foo/virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.expected_filename(
virtual_test, '.txt',
fallback_base_for_virtual=False), MOCK_WEB_TESTS +
'platform/foo/virtual/flag/fast/test-expected.txt')
self.assertEqual(
port.fallback_expected_filename(virtual_test, '.txt'),
MOCK_WEB_TESTS + 'virtual/flag/fast/test-expected.txt')
def test_additional_platform_directory(self):
port = self.make_port(port_name='foo')
port.FALLBACK_PATHS = {'': ['foo']}
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'VirtualTestSuites', '[]')
test_file = 'fast/test.html'
# Simple additional platform directory
port._options.additional_platform_directory = ['/tmp/local-baselines'] # pylint: disable=protected-access
self.assertEqual(port.baseline_version_dir(), '/tmp/local-baselines')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[(None, 'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt', return_default=False),
None)
self.assertEqual(
port.expected_filename(test_file, '.txt'),
MOCK_WEB_TESTS + 'fast/test-expected.txt')
port.host.filesystem.write_text_file(
'/tmp/local-baselines/fast/test-expected.txt', 'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt'),
'/tmp/local-baselines/fast/test-expected.txt')
# Multiple additional platform directories
port._options.additional_platform_directory = [ # pylint: disable=protected-access
'/foo', '/tmp/local-baselines'
]
self.assertEqual(port.baseline_version_dir(), '/foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/tmp/local-baselines', 'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt'),
'/tmp/local-baselines/fast/test-expected.txt')
port.host.filesystem.write_text_file('/foo/fast/test-expected.txt',
'foo')
self.assertEqual(
port.expected_baselines(test_file, '.txt'),
[('/foo', 'fast/test-expected.txt')])
self.assertEqual(
port.expected_filename(test_file, '.txt'),
'/foo/fast/test-expected.txt')
def test_nonexistant_expectations(self):
port = self.make_port(port_name='foo')
port.default_expectations_files = lambda: [
MOCK_WEB_TESTS + 'platform/exists/TestExpectations', MOCK_WEB_TESTS
+ 'platform/nonexistant/TestExpectations'
]
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'platform/exists/TestExpectations', '')
self.assertEqual('\n'.join(port.expectations_dict().keys()),
MOCK_WEB_TESTS + 'platform/exists/TestExpectations')
def _make_port_for_test_additional_expectations(self, options_dict={}):
port = self.make_port(
port_name='foo', options=optparse.Values(options_dict))
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'platform/foo/TestExpectations', '')
port.host.filesystem.write_text_file(
'/tmp/additional-expectations-1.txt', 'content1\n')
port.host.filesystem.write_text_file(
'/tmp/additional-expectations-2.txt', 'content2\n')
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'FlagExpectations/special-flag', 'content3')
return port
def test_additional_expectations_empty(self):
port = self._make_port_for_test_additional_expectations()
self.assertEqual(list(port.expectations_dict().values()), [])
def test_additional_expectations_1(self):
port = self._make_port_for_test_additional_expectations({
'additional_expectations': ['/tmp/additional-expectations-1.txt']
})
self.assertEqual(list(port.expectations_dict().values()),
['content1\n'])
def test_additional_expectations_2(self):
port = self._make_port_for_test_additional_expectations({
'additional_expectations': [
'/tmp/additional-expectations-1.txt',
'/tmp/additional-expectations-2.txt'
]
})
self.assertEqual(list(port.expectations_dict().values()),
['content1\n', 'content2\n'])
def test_additional_expectations_additional_flag(self):
port = self._make_port_for_test_additional_expectations({
'additional_expectations': [
'/tmp/additional-expectations-1.txt',
'/tmp/additional-expectations-2.txt'
],
'additional_driver_flag': ['--special-flag']
})
self.assertEqual(list(port.expectations_dict().values()),
['content3', 'content1\n', 'content2\n'])
def test_flag_specific_expectations(self):
port = self.make_port(port_name='foo')
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'FlagExpectations/special-flag-a', 'aa')
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'FlagExpectations/special-flag-b', 'bb')
port.host.filesystem.write_text_file(
MOCK_WEB_TESTS + 'FlagExpectations/README.txt', 'cc')
self.assertEqual(list(port.expectations_dict().values()), [])
# all_expectations_dict() is an OrderedDict, but its order depends on
# file system walking order.
self.assertEqual(
sorted(port.all_expectations_dict().values()), ['aa', 'bb'])
def test_flag_specific_expectations_identify_unreadable_file(self):
port = self.make_port(port_name='foo')
non_utf8_file = MOCK_WEB_TESTS + 'FlagExpectations/non-utf8-file'
invalid_utf8 = b'\xC0'
port.host.filesystem.write_binary_file(non_utf8_file, invalid_utf8)
with self.assertRaises(UnicodeDecodeError):
port.all_expectations_dict()
# The UnicodeDecodeError does not indicate which file we failed to read,
# so ensure that the file is identified in a log message.
self.assertLog([
'ERROR: Failed to read expectations file: \'' + non_utf8_file +
'\'\n'
])
def test_flag_specific_config_name_from_options(self):
port_a = self.make_port(options=optparse.Values({}))
# pylint: disable=protected-access
self.assertEqual(port_a._specified_additional_driver_flags(), [])
self.assertIsNone(port_a.flag_specific_config_name())
port_b = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb']
}))
self.assertEqual(port_b._specified_additional_driver_flags(), ['--bb'])
self.assertEqual(port_b.flag_specific_config_name(), 'bb')
port_c = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--cc', '--dd']
}))
self.assertEqual(port_c._specified_additional_driver_flags(),
['--cc', '--dd'])
self.assertEqual(port_c.flag_specific_config_name(), 'cc')
def test_flag_specific_config_name_from_options_and_file(self):
flag_file = MOCK_WEB_TESTS + 'additional-driver-flag.setting'
port_a = self.make_port(options=optparse.Values({}))
port_a.host.filesystem.write_text_file(flag_file, '--aa')
# pylint: disable=protected-access
self.assertEqual(port_a._specified_additional_driver_flags(), ['--aa'])
self.assertEqual(port_a.flag_specific_config_name(), 'aa')
port_b = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb']
}))
port_b.host.filesystem.write_text_file(flag_file, '--aa')
self.assertEqual(port_b._specified_additional_driver_flags(),
['--aa', '--bb'])
self.assertEqual(port_b.flag_specific_config_name(), 'aa')
port_c = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb', '--cc']
}))
port_c.host.filesystem.write_text_file(flag_file, '--bb --dd')
# We don't remove duplicated flags at this time.
self.assertEqual(port_c._specified_additional_driver_flags(),
['--bb', '--dd', '--bb', '--cc'])
self.assertEqual(port_c.flag_specific_config_name(), 'bb')
def _write_flag_specific_config(self, port):
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.web_tests_dir(),
'FlagSpecificConfig'), '['
' {"name": "a", "args": ["--aa"]},'
' {"name": "b", "args": ["--bb", "--aa"]},'
' {"name": "c", "args": ["--bb", "--cc"]}'
']')
def test_flag_specific_config_name_from_options_and_config(self):
port_a1 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--aa']
}))
self._write_flag_specific_config(port_a1)
# pylint: disable=protected-access
self.assertEqual(port_a1.flag_specific_config_name(), 'a')
port_a2 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--aa', '--dd']
}))
self._write_flag_specific_config(port_a2)
self.assertEqual(port_a2.flag_specific_config_name(), 'a')
port_a3 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--aa', '--bb']
}))
self._write_flag_specific_config(port_a3)
self.assertEqual(port_a3.flag_specific_config_name(), 'a')
port_b1 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb', '--aa']
}))
self._write_flag_specific_config(port_b1)
self.assertEqual(port_b1.flag_specific_config_name(), 'b')
port_b2 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb', '--aa', '--cc']
}))
self._write_flag_specific_config(port_b2)
self.assertEqual(port_b2.flag_specific_config_name(), 'b')
port_b3 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb', '--aa', '--dd']
}))
self._write_flag_specific_config(port_b3)
self.assertEqual(port_b3.flag_specific_config_name(), 'b')
port_c1 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb', '--cc']
}))
self._write_flag_specific_config(port_c1)
self.assertEqual(port_c1.flag_specific_config_name(), 'c')
port_c2 = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb', '--cc', '--aa']
}))
self._write_flag_specific_config(port_c2)
self.assertEqual(port_c2.flag_specific_config_name(), 'c')
def test_flag_specific_fallback(self):
port_b = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--bb']
}))
self._write_flag_specific_config(port_b)
# No match. Fallback to first specified flag.
self.assertEqual(port_b.flag_specific_config_name(), 'bb')
port_d = self.make_port(
options=optparse.Values({
'additional_driver_flag': ['--dd', '--ee']
}))
self._write_flag_specific_config(port_d)
# pylint: disable=protected-access
self.assertEqual(port_d.flag_specific_config_name(), 'dd')
def test_flag_specific_option(self):
port_a = self.make_port(
options=optparse.Values({
'flag_specific': 'a'
}))
self._write_flag_specific_config(port_a)
# pylint: disable=protected-access
self.assertEqual(port_a.flag_specific_config_name(), 'a')
port_b = self.make_port(
options=optparse.Values({
'flag_specific': 'a',
'additional_driver_flag': ['--bb']
}))
self._write_flag_specific_config(port_b)
self.assertEqual(port_b.flag_specific_config_name(), 'a')
port_d = self.make_port(
options=optparse.Values({
'flag_specific': 'd'
}))
self._write_flag_specific_config(port_d)
self.assertRaises(AssertionError, port_d.flag_specific_config_name)
def test_duplicate_flag_specific_name(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.web_tests_dir(),
'FlagSpecificConfig'),
'[{"name": "a", "args": ["--aa"]}, {"name": "a", "args": ["--aa", "--bb"]}]'
)
# pylint: disable=protected-access
self.assertRaises(ValueError, port._flag_specific_configs)
def test_duplicate_flag_specific_args(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.web_tests_dir(),
'FlagSpecificConfig'),
'[{"name": "a", "args": ["--aa"]}, {"name": "b", "args": ["--aa"]}]'
)
# pylint: disable=protected-access
self.assertRaises(ValueError, port._flag_specific_configs)
def test_invalid_flag_specific_name(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.web_tests_dir(),
'FlagSpecificConfig'),
'[{"name": "a/", "args": ["--aa"]}]')
# pylint: disable=protected-access
self.assertRaises(ValueError, port._flag_specific_configs)
def test_additional_env_var(self):
port = self.make_port(
options=optparse.Values({
'additional_env_var': ['FOO=BAR', 'BAR=FOO']
}))
self.assertEqual(
port.get_option('additional_env_var'), ['FOO=BAR', 'BAR=FOO'])
environment = port.setup_environ_for_server()
self.assertTrue(('FOO' in environment) & ('BAR' in environment))
self.assertEqual(environment['FOO'], 'BAR')
self.assertEqual(environment['BAR'], 'FOO')
def test_find_no_paths_specified(self):
port = self.make_port(with_tests=True)
tests = port.tests([])
self.assertNotEqual(len(tests), 0)
def test_find_one_test(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/image.html'])
self.assertEqual(len(tests), 1)
def test_find_glob(self):
port = self.make_port(with_tests=True)
tests = port.tests(['failures/expected/im*'])
self.assertEqual(len(tests), 2)
def test_find_with_skipped_directories(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts'])
self.assertNotIn('userscripts/resources/iframe.html', tests)
def test_find_with_skipped_directories_2(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts/resources'])
self.assertEqual(tests, [])
def test_update_manifest_once_by_default(self):
# pylint: disable=protected-access
port = self.make_port(with_tests=True)
port.wpt_manifest('external/wpt')
port.wpt_manifest('external/wpt')
self.assertEqual(len(port.host.filesystem.written_files), 1)
self.assertEqual(len(port.host.executive.calls), 1)
def test_no_manifest_update_with_existing_manifest(self):
# pylint: disable=protected-access
port = self.make_port(with_tests=True)
port.set_option_default('manifest_update', False)
filesystem = port.host.filesystem
filesystem.write_text_file(
WEB_TEST_DIR + '/external/wpt/MANIFEST.json', '{}')
filesystem.clear_written_files()
port.wpt_manifest('external/wpt')
self.assertEqual(len(port.host.filesystem.written_files), 0)
self.assertEqual(len(port.host.executive.calls), 0)
def test_no_manifest_update_without_existing_manifest(self):
# pylint: disable=protected-access
port = self.make_port(with_tests=True)
port.set_option_default('manifest_update', False)
port.wpt_manifest('external/wpt')
self.assertEqual(len(port.host.filesystem.written_files), 1)
self.assertEqual(len(port.host.executive.calls), 1)
def test_find_none_if_not_in_manifest(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertNotIn('external/wpt/common/blank.html', port.tests([]))
self.assertNotIn('external/wpt/console/console-is-a-namespace.any.js',
port.tests([]))
def test_find_one_if_in_manifest(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertIn('external/wpt/dom/ranges/Range-attributes.html',
port.tests([]))
self.assertIn('external/wpt/console/console-is-a-namespace.any.html',
port.tests([]))
def test_wpt_tests_paths(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
all_wpt = [
'external/wpt/console/console-is-a-namespace.any.html',
'external/wpt/console/console-is-a-namespace.any.worker.html',
'external/wpt/dom/ranges/Range-attributes-slow.html',
'external/wpt/dom/ranges/Range-attributes.html',
'external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html',
'external/wpt/html/parse.html?run_type=uri',
'external/wpt/html/parse.html?run_type=write',
'external/wpt/portals/portals-no-frame-crash.html',
]
# test.any.js shows up on the filesystem as one file but it effectively becomes two test files:
# test.any.html and test.any.worker.html. We should support running test.any.js by name and
# indirectly by specifying a parent directory.
self.assertEqual(sorted(port.tests(['external'])), all_wpt)
self.assertEqual(sorted(port.tests(['external/'])), all_wpt)
self.assertEqual(port.tests(['external/csswg-test']), [])
self.assertEqual(sorted(port.tests(['external/wpt'])), all_wpt)
self.assertEqual(sorted(port.tests(['external/wpt/'])), all_wpt)
self.assertEqual(
sorted(port.tests(['external/wpt/console'])), [
'external/wpt/console/console-is-a-namespace.any.html',
'external/wpt/console/console-is-a-namespace.any.worker.html'
])
self.assertEqual(
sorted(port.tests(['external/wpt/console/'])), [
'external/wpt/console/console-is-a-namespace.any.html',
'external/wpt/console/console-is-a-namespace.any.worker.html'
])
self.assertEqual(
sorted(
port.tests(
['external/wpt/console/console-is-a-namespace.any.js'])),
[
'external/wpt/console/console-is-a-namespace.any.html',
'external/wpt/console/console-is-a-namespace.any.worker.html'
])
self.assertEqual(
port.tests(
['external/wpt/console/console-is-a-namespace.any.html']),
['external/wpt/console/console-is-a-namespace.any.html'])
self.assertEqual(
sorted(port.tests(['external/wpt/dom'])), [
'external/wpt/dom/ranges/Range-attributes-slow.html',
'external/wpt/dom/ranges/Range-attributes.html'
])
self.assertEqual(
sorted(port.tests(['external/wpt/dom/'])), [
'external/wpt/dom/ranges/Range-attributes-slow.html',
'external/wpt/dom/ranges/Range-attributes.html'
])
self.assertEqual(
port.tests(['external/wpt/dom/ranges/Range-attributes.html']),
['external/wpt/dom/ranges/Range-attributes.html'])
# wpt_internal should work the same.
self.assertEqual(
port.tests(['wpt_internal']), ['wpt_internal/dom/bar.html'])
def test_virtual_wpt_tests_paths(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
all_wpt = [
'virtual/virtual_wpt/external/wpt/console/console-is-a-namespace.any.html',
'virtual/virtual_wpt/external/wpt/console/console-is-a-namespace.any.worker.html',
'virtual/virtual_wpt/external/wpt/dom/ranges/Range-attributes-slow.html',
'virtual/virtual_wpt/external/wpt/dom/ranges/Range-attributes.html',
'virtual/virtual_wpt/external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html',
'virtual/virtual_wpt/external/wpt/html/parse.html?run_type=uri',
'virtual/virtual_wpt/external/wpt/html/parse.html?run_type=write',
'virtual/virtual_wpt/external/wpt/portals/portals-no-frame-crash.html',
]
dom_wpt = [
'virtual/virtual_wpt_dom/external/wpt/dom/ranges/Range-attributes-slow.html',
'virtual/virtual_wpt_dom/external/wpt/dom/ranges/Range-attributes.html',
]
self.assertEqual(
sorted(port.tests(['virtual/virtual_wpt/external/'])), all_wpt)
self.assertEqual(
sorted(port.tests(['virtual/virtual_wpt/external/wpt/'])), all_wpt)
self.assertEqual(
sorted(port.tests(['virtual/virtual_wpt/external/wpt/console'])), [
'virtual/virtual_wpt/external/wpt/console/console-is-a-namespace.any.html',
'virtual/virtual_wpt/external/wpt/console/console-is-a-namespace.any.worker.html'
])
self.assertEqual(
sorted(port.tests(['virtual/virtual_wpt_dom/external/wpt/dom/'])),
dom_wpt)
self.assertEqual(
sorted(
port.tests(
['virtual/virtual_wpt_dom/external/wpt/dom/ranges/'])),
dom_wpt)
self.assertEqual(
port.tests([
'virtual/virtual_wpt_dom/external/wpt/dom/ranges/Range-attributes.html'
]), [
'virtual/virtual_wpt_dom/external/wpt/dom/ranges/Range-attributes.html'
])
# wpt_internal should work the same.
self.assertEqual(
port.tests(['virtual/virtual_wpt_dom/wpt_internal']),
['virtual/virtual_wpt_dom/wpt_internal/dom/bar.html'])
self.assertEqual(
sorted(port.tests(['virtual/virtual_wpt_dom/'])),
dom_wpt + ['virtual/virtual_wpt_dom/wpt_internal/dom/bar.html'])
def test_virtual_test_paths(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
ssl_tests = [
'virtual/mixed_wpt/http/tests/ssl/text.html',
]
http_passes_tests = [
'virtual/mixed_wpt/http/tests/passes/image.html',
'virtual/mixed_wpt/http/tests/passes/text.html',
]
dom_tests = [
'virtual/mixed_wpt/external/wpt/dom/ranges/Range-attributes-slow.html',
'virtual/mixed_wpt/external/wpt/dom/ranges/Range-attributes.html',
]
# The full set of tests must be returned when running the entire suite.
self.assertEqual(sorted(port.tests(['virtual/mixed_wpt/'])),
dom_tests + http_passes_tests + ssl_tests)
self.assertEqual(sorted(port.tests(['virtual/mixed_wpt/external'])),
dom_tests)
self.assertEqual(sorted(port.tests(['virtual/mixed_wpt/http'])),
http_passes_tests + ssl_tests)
self.assertEqual(
sorted(
port.tests([
'virtual/mixed_wpt/http/tests/ssl',
'virtual/mixed_wpt/external/wpt/dom'
])), dom_tests + ssl_tests)
# Make sure we don't run a non-existent test.
self.assertEqual(sorted(port.tests(['virtual/mixed_wpt/passes'])), [])
def test_is_non_wpt_test_file(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.is_non_wpt_test_file('', 'foo.html'))
self.assertTrue(port.is_non_wpt_test_file('', 'foo.svg'))
self.assertTrue(port.is_non_wpt_test_file('', 'test-ref-test.html'))
self.assertTrue(port.is_non_wpt_test_file('devtools', 'a.js'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo.png'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo-expected.html'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo-expected.svg'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo-expected.xht'))
self.assertFalse(
port.is_non_wpt_test_file('', 'foo-expected-mismatch.html'))
self.assertFalse(
port.is_non_wpt_test_file('', 'foo-expected-mismatch.svg'))
self.assertFalse(
port.is_non_wpt_test_file('', 'foo-expected-mismatch.xhtml'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo-ref.html'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo-notref.html'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo-notref.xht'))
self.assertFalse(port.is_non_wpt_test_file('', 'foo-ref.xhtml'))
self.assertFalse(port.is_non_wpt_test_file('', 'ref-foo.html'))
self.assertFalse(port.is_non_wpt_test_file('', 'notref-foo.xhr'))
self.assertFalse(
port.is_non_wpt_test_file(WEB_TEST_DIR + '/external/wpt/common',
'blank.html'))
self.assertFalse(
port.is_non_wpt_test_file(WEB_TEST_DIR + '/external/wpt/console',
'console-is-a-namespace.any.js'))
self.assertFalse(
port.is_non_wpt_test_file(WEB_TEST_DIR + '/external/wpt',
'testharness_runner.html'))
self.assertTrue(
port.is_non_wpt_test_file(
WEB_TEST_DIR + '/external/wpt_automation', 'foo.html'))
self.assertFalse(
port.is_non_wpt_test_file(WEB_TEST_DIR + '/wpt_internal/console',
'console-is-a-namespace.any.js'))
def test_is_wpt_test(self):
self.assertTrue(
Port.is_wpt_test('external/wpt/dom/ranges/Range-attributes.html'))
self.assertTrue(
Port.is_wpt_test(
'external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html'
))
self.assertFalse(Port.is_wpt_test('dom/domparsing/namespaces-1.html'))
self.assertFalse(Port.is_wpt_test('rutabaga'))
self.assertTrue(
Port.is_wpt_test('virtual/a-name/external/wpt/baz/qux.htm'))
self.assertFalse(Port.is_wpt_test('virtual/external/wpt/baz/qux.htm'))
self.assertFalse(
Port.is_wpt_test('not-virtual/a-name/external/wpt/baz/qux.htm'))
def test_is_wpt_idlharness_test(self):
self.assertTrue(
Port.is_wpt_idlharness_test(
'external/wpt/css/css-pseudo/idlharness.html'))
self.assertTrue(
Port.is_wpt_idlharness_test(
'external/wpt/payment-handler/idlharness.https.any.html'))
self.assertTrue(
Port.is_wpt_idlharness_test(
'external/wpt/payment-handler/idlharness.https.any.serviceworker.html'
))
self.assertFalse(
Port.is_wpt_idlharness_test(
'external/wpt/css/foo/interfaces.html'))
self.assertFalse(
Port.is_wpt_idlharness_test(
'external/wpt/css/idlharness/bar.html'))
def test_should_use_wptserve(self):
self.assertTrue(
Port.should_use_wptserve('external/wpt/dom/interfaces.html'))
self.assertTrue(
Port.should_use_wptserve(
'virtual/a-name/external/wpt/dom/interfaces.html'))
self.assertFalse(
Port.should_use_wptserve('harness-tests/wpt/console_logging.html'))
self.assertFalse(
Port.should_use_wptserve('dom/domparsing/namespaces-1.html'))
def test_is_wpt_crash_test(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertTrue(
port.is_wpt_crash_test(
'external/wpt/portals/portals-no-frame-crash.html'))
self.assertFalse(
port.is_wpt_crash_test(
'external/wpt/nonexistent/i-dont-exist-crash.html'))
self.assertFalse(
port.is_wpt_crash_test(
'external/wpt/dom/ranges/Range-attributes.html'))
self.assertFalse(
port.is_wpt_crash_test('portals/portals-no-frame-crash.html'))
def test_is_slow_wpt_test(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertFalse(
port.is_slow_wpt_test(
'external/wpt/dom/ranges/Range-attributes.html'))
self.assertTrue(
port.is_slow_wpt_test(
'external/wpt/dom/ranges/Range-attributes-slow.html'))
self.assertTrue(
port.is_slow_wpt_test(
'external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html'
))
self.assertFalse(
port.is_slow_wpt_test(
'external/wpt/css/css-pseudo/idlharness.html'))
def test_is_slow_wpt_test_idlharness_with_dcheck(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
port.host.filesystem.write_text_file(port._build_path('args.gn'),
'dcheck_always_on=true\n')
# We always consider idlharness tests slow, even if they aren't marked
# such in the manifest. See https://crbug.com/1047818
self.assertTrue(
port.is_slow_wpt_test(
'external/wpt/css/css-pseudo/idlharness.html'))
def test_is_slow_wpt_test_with_variations(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertFalse(
port.is_slow_wpt_test(
'external/wpt/console/console-is-a-namespace.any.html'))
self.assertTrue(
port.is_slow_wpt_test(
'external/wpt/console/console-is-a-namespace.any.worker.html'))
self.assertFalse(
port.is_slow_wpt_test('external/wpt/html/parse.html?run_type=uri'))
self.assertTrue(
port.is_slow_wpt_test(
'external/wpt/html/parse.html?run_type=write'))
def test_is_slow_wpt_test_takes_virtual_tests(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertFalse(
port.is_slow_wpt_test(
'virtual/virtual_wpt/external/wpt/dom/ranges/Range-attributes.html'
))
self.assertTrue(
port.is_slow_wpt_test(
'virtual/virtual_wpt/external/wpt/dom/ranges/Range-attributes-slow.html'
))
def test_is_slow_wpt_test_returns_false_for_illegal_paths(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertFalse(
port.is_slow_wpt_test('dom/ranges/Range-attributes.html'))
self.assertFalse(
port.is_slow_wpt_test('dom/ranges/Range-attributes-slow.html'))
self.assertFalse(
port.is_slow_wpt_test('/dom/ranges/Range-attributes.html'))
self.assertFalse(
port.is_slow_wpt_test('/dom/ranges/Range-attributes-slow.html'))
def test_get_file_path_for_wpt_test(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertEqual(
port.get_file_path_for_wpt_test(
'virtual/virtual_wpt/external/wpt/dom/ranges/Range-attributes.html'
),
'external/wpt/dom/ranges/Range-attributes.html',
)
self.assertEqual(
port.get_file_path_for_wpt_test(
'external/wpt/console/console-is-a-namespace.any.worker.html'),
'external/wpt/console/console-is-a-namespace.any.js',
)
self.assertEqual(
port.get_file_path_for_wpt_test(
'external/wpt/html/parse.html?run_type=uri'),
'external/wpt/html/parse.html',
)
self.assertIsNone(port.get_file_path_for_wpt_test('non-wpt/test.html'))
self.assertIsNone(
port.get_file_path_for_wpt_test('external/wpt/non-existent.html'))
def test_reference_files(self):
port = self.make_port(with_tests=True)
self.assertEqual(
port.reference_files('passes/svgreftest.svg'),
[('==', port.web_tests_dir() + '/passes/svgreftest-expected.svg')])
self.assertEqual(
port.reference_files('passes/xhtreftest.svg'),
[('==', port.web_tests_dir() + '/passes/xhtreftest-expected.html')
])
self.assertEqual(
port.reference_files('passes/phpreftest.php'),
[('!=', port.web_tests_dir() +
'/passes/phpreftest-expected-mismatch.svg')])
def test_reference_files_from_manifest(self):
port = self.make_port(with_tests=True)
add_manifest_to_mock_filesystem(port)
self.assertEqual(
port.reference_files(
'external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html'
),
[('==', port.web_tests_dir() +
'/external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L-ref.html'
)])
self.assertEqual(
port.reference_files(
'virtual/layout_ng/' +
'external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L.html'
),
[('==', port.web_tests_dir() +
'/external/wpt/html/dom/elements/global-attributes/dir_auto-EN-L-ref.html'
)])
def test_http_server_supports_ipv6(self):
port = self.make_port()
self.assertTrue(port.http_server_supports_ipv6())
port.host.platform.os_name = 'win'
self.assertFalse(port.http_server_supports_ipv6())
def test_http_server_requires_http_protocol_options_unsafe(self):
port = self.make_port(
executive=MockExecutive(
stderr=
("Invalid command 'INTENTIONAL_SYNTAX_ERROR', perhaps misspelled or"
" defined by a module not included in the server configuration\n"
)))
port.path_to_apache = lambda: '/usr/sbin/httpd'
self.assertTrue(
port.http_server_requires_http_protocol_options_unsafe())
def test_http_server_doesnt_require_http_protocol_options_unsafe(self):
port = self.make_port(
executive=MockExecutive(
stderr=
("Invalid command 'HttpProtocolOptions', perhaps misspelled or"
" defined by a module not included in the server configuration\n"
)))
port.path_to_apache = lambda: '/usr/sbin/httpd'
self.assertFalse(
port.http_server_requires_http_protocol_options_unsafe())
def test_check_httpd_success(self):
port = self.make_port(executive=MockExecutive())
port.path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertTrue(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('', logs)
def test_httpd_returns_error_code(self):
port = self.make_port(executive=MockExecutive(exit_code=1))
port.path_to_apache = lambda: '/usr/sbin/httpd'
capture = OutputCapture()
capture.capture_output()
self.assertFalse(port.check_httpd())
_, _, logs = capture.restore_output()
self.assertEqual('httpd seems broken. Cannot run http tests.\n', logs)
def test_test_exists(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_exists('passes'))
self.assertTrue(port.test_exists('passes/text.html'))
self.assertFalse(port.test_exists('passes/does_not_exist.html'))
self.assertTrue(port.test_exists('virtual'))
self.assertFalse(port.test_exists('virtual/does_not_exist.html'))
self.assertTrue(
port.test_exists('virtual/virtual_passes/passes/text.html'))
self.assertTrue(
port.test_exists('virtual/virtual_empty_bases/physical1.html'))
self.assertTrue(
port.test_exists('virtual/virtual_empty_bases/dir/physical2.html'))
self.assertFalse(
port.test_exists(
'virtual/virtual_empty_bases/does_not_exist.html'))
def test_test_isfile(self):
port = self.make_port(with_tests=True)
self.assertFalse(port.test_isfile('passes'))
self.assertTrue(port.test_isfile('passes/text.html'))
self.assertFalse(port.test_isfile('passes/does_not_exist.html'))
self.assertFalse(port.test_isfile('virtual'))
self.assertTrue(
port.test_isfile('virtual/virtual_passes/passes/text.html'))
self.assertFalse(port.test_isfile('virtual/does_not_exist.html'))
self.assertTrue(
port.test_isfile('virtual/virtual_empty_bases/physical1.html'))
self.assertTrue(
port.test_isfile('virtual/virtual_empty_bases/dir/physical2.html'))
self.assertFalse(
port.test_exists(
'virtual/virtual_empty_bases/does_not_exist.html'))
def test_test_isdir(self):
port = self.make_port(with_tests=True)
self.assertTrue(port.test_isdir('passes'))
self.assertFalse(port.test_isdir('passes/text.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist.html'))
self.assertFalse(port.test_isdir('passes/does_not_exist/'))
self.assertTrue(port.test_isdir('virtual'))
self.assertFalse(port.test_isdir('virtual/does_not_exist.html'))
self.assertFalse(port.test_isdir('virtual/does_not_exist/'))
self.assertFalse(
port.test_isdir('virtual/virtual_passes/passes/text.html'))
self.assertTrue(port.test_isdir('virtual/virtual_empty_bases/'))
self.assertTrue(port.test_isdir('virtual/virtual_empty_bases/dir'))
self.assertFalse(
port.test_isdir('virtual/virtual_empty_bases/dir/physical2.html'))
self.assertFalse(
port.test_isdir('virtual/virtual_empty_bases/does_not_exist/'))
def test_tests(self):
port = self.make_port(with_tests=True)
tests = port.tests([])
self.assertIn('passes/text.html', tests)
self.assertIn('virtual/virtual_passes/passes/text.html', tests)
self.assertIn('virtual/virtual_empty_bases/physical1.html', tests)
self.assertIn('virtual/virtual_empty_bases/dir/physical2.html', tests)
tests = port.tests(['passes'])
self.assertIn('passes/text.html', tests)
self.assertIn('passes/virtual_passes/test-virtual-passes.html', tests)
self.assertNotIn('virtual/virtual_passes/passes/text.html', tests)
# crbug.com/880609: test trailing slashes
tests = port.tests(['virtual/virtual_passes'])
self.assertIn('virtual/virtual_passes/passes/test-virtual-passes.html',
tests)
self.assertIn(
'virtual/virtual_passes/passes_two/test-virtual-passes.html',
tests)
tests = port.tests(['virtual/virtual_passes/'])
self.assertIn('virtual/virtual_passes/passes/test-virtual-passes.html',
tests)
self.assertIn(
'virtual/virtual_passes/passes_two/test-virtual-passes.html',
tests)
tests = port.tests(['virtual/virtual_passes/passes'])
self.assertNotIn('passes/text.html', tests)
self.assertIn('virtual/virtual_passes/passes/test-virtual-passes.html',
tests)
self.assertNotIn(
'virtual/virtual_passes/passes_two/test-virtual-passes.html',
tests)
self.assertNotIn('passes/test-virtual-passes.html', tests)
self.assertNotIn(
'virtual/virtual_passes/passes/test-virtual-virtual/passes.html',
tests)
self.assertNotIn(
'virtual/virtual_passes/passes/virtual_passes/passes/test-virtual-passes.html',
tests)
tests = port.tests(
['virtual/virtual_passes/passes/test-virtual-passes.html'])
self.assertEquals(
['virtual/virtual_passes/passes/test-virtual-passes.html'], tests)
tests = port.tests(['virtual/virtual_empty_bases'])
self.assertEquals([
'virtual/virtual_empty_bases/physical1.html',
'virtual/virtual_empty_bases/dir/physical2.html'
], tests)
tests = port.tests(['virtual/virtual_empty_bases/dir'])
self.assertEquals(['virtual/virtual_empty_bases/dir/physical2.html'],
tests)
tests = port.tests(['virtual/virtual_empty_bases/dir/physical2.html'])
self.assertEquals(['virtual/virtual_empty_bases/dir/physical2.html'],
tests)
def test_build_path(self):
# Test for a protected method - pylint: disable=protected-access
# Test that optional paths are used regardless of whether they exist.
options = optparse.Values({
'configuration': 'Release',
'build_directory': 'xcodebuild'
})
self.assertEqual(
self.make_port(options=options)._build_path(),
'/mock-checkout/xcodebuild/Release')
# Test that "out" is used as the default.
options = optparse.Values({
'configuration': 'Release',
'build_directory': None
})
self.assertEqual(
self.make_port(options=options)._build_path(),
'/mock-checkout/out/Release')
def test_dont_require_http_server(self):
port = self.make_port()
self.assertEqual(port.requires_http_server(), False)
def test_can_load_actual_virtual_test_suite_file(self):
port = Port(SystemHost(), 'baseport')
# If this call returns successfully, we found and loaded the web_tests/VirtualTestSuites.
_ = port.virtual_test_suites()
def test_good_virtual_test_suite_file(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.web_tests_dir(),
'VirtualTestSuites'),
'[{"prefix": "bar", "bases": ["fast/bar"], "args": ["--bar"]}]')
# If this call returns successfully, we found and loaded the web_tests/VirtualTestSuites.
_ = port.virtual_test_suites()
def test_duplicate_virtual_prefix_in_file(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.web_tests_dir(),
'VirtualTestSuites'), '['
'{"prefix": "bar", "bases": ["fast/bar"], "args": ["--bar"]},'
'{"prefix": "bar", "bases": ["fast/foo"], "args": ["--bar"]}'
']')
self.assertRaises(ValueError, port.virtual_test_suites)
def test_virtual_test_suite_file_is_not_json(self):
port = self.make_port()
port.host.filesystem.write_text_file(
port.host.filesystem.join(port.web_tests_dir(),
'VirtualTestSuites'), '{[{[')
self.assertRaises(ValueError, port.virtual_test_suites)
def test_lookup_virtual_test_base(self):
port = self.make_port(with_tests=True)
self.assertIsNone(port.lookup_virtual_test_base('non/virtual'))
self.assertIsNone(port.lookup_virtual_test_base('passes/text.html'))
self.assertIsNone(
port.lookup_virtual_test_base('virtual/non-existing/test.html'))
# lookup_virtual_test_base() checks virtual prefix and bases, but doesn't
# check existence of test.
self.assertEqual(
'passes/text.html',
port.lookup_virtual_test_base(
'virtual/virtual_passes/passes/text.html'))
self.assertEqual(
'passes/any.html',
port.lookup_virtual_test_base(
'virtual/virtual_passes/passes/any.html'))
self.assertEqual(
'passes_two/any.html',
port.lookup_virtual_test_base(
'virtual/virtual_passes/passes_two/any.html'))
self.assertEqual(
'passes/',
port.lookup_virtual_test_base('virtual/virtual_passes/passes/'))
self.assertEqual(
'passes/',
port.lookup_virtual_test_base('virtual/virtual_passes/passes'))
self.assertIsNone(
port.lookup_virtual_test_base('virtual/virtual_passes/'))
self.assertIsNone(
port.lookup_virtual_test_base('virtual/virtual_passes'))
# 'failures' is not a specified base of virtual/virtual_passes
self.assertIsNone(
port.lookup_virtual_test_base(
'virtual/virtual_passes/failures/unexpected/text.html'))
self.assertEqual(
'failures/unexpected/text.html',
port.lookup_virtual_test_base(
'virtual/virtual_failures/failures/unexpected/text.html'))
# 'passes' is not a specified base of virtual/virtual_failures
self.assertIsNone(
port.lookup_virtual_test_base(
'virtual/virtual_failures/passes/text.html'))
# Partial match of base with multiple levels.
self.assertEqual(
'failures/',
port.lookup_virtual_test_base(
'virtual/virtual_failures/failures/'))
self.assertEqual(
'failures/',
port.lookup_virtual_test_base('virtual/virtual_failures/failures'))
self.assertIsNone(
port.lookup_virtual_test_base('virtual/virtual_failures/'))
self.assertIsNone(
port.lookup_virtual_test_base('virtual/virtual_failures'))
# Empty bases.
self.assertIsNone(
port.lookup_virtual_test_base(
'virtual/virtual_empty_bases/physical1.html'))
self.assertIsNone(
port.lookup_virtual_test_base(
'virtual/virtual_empty_bases/passes/text.html'))
self.assertIsNone(
port.lookup_virtual_test_base('virtual/virtual_empty_bases'))
def test_args_for_test(self):
port = self.make_port(with_tests=True)
self.assertEqual([], port.args_for_test('non/virtual'))
self.assertEqual([], port.args_for_test('passes/text.html'))
self.assertEqual([],
port.args_for_test('virtual/non-existing/test.html'))
self.assertEqual(
['--virtual-arg'],
port.args_for_test('virtual/virtual_passes/passes/text.html'))
self.assertEqual(
['--virtual-arg'],
port.args_for_test('virtual/virtual_passes/passes/any.html'))
self.assertEqual(['--virtual-arg'],
port.args_for_test('virtual/virtual_passes/passes/'))
self.assertEqual(['--virtual-arg'],
port.args_for_test('virtual/virtual_passes/passes'))
self.assertEqual(['--virtual-arg'],
port.args_for_test('virtual/virtual_passes/'))
self.assertEqual(['--virtual-arg'],
port.args_for_test('virtual/virtual_passes'))
def test_missing_virtual_test_suite_file(self):
port = self.make_port()
self.assertRaises(AssertionError, port.virtual_test_suites)
def test_default_results_directory(self):
port = self.make_port(
options=optparse.Values({
'target': 'Default',
'configuration': 'Release'
}))
# By default the results directory is in the build directory: out/<target>.
self.assertEqual(port.default_results_directory(),
'/mock-checkout/out/Default')
def test_results_directory(self):
port = self.make_port(
options=optparse.Values({
'results_directory':
'some-directory/results'
}))
# A results directory can be given as an option, and it is relative to current working directory.
self.assertEqual(port.host.filesystem.cwd, '/')
self.assertEqual(port.results_directory(), '/some-directory/results')
def _assert_config_file_for_platform(self, port, platform, config_file):
port.host.platform = MockPlatformInfo(os_name=platform)
self.assertEqual(
port._apache_config_file_name_for_platform(), # pylint: disable=protected-access
config_file)
def _assert_config_file_for_linux_distribution(self, port, distribution,
config_file):
port.host.platform = MockPlatformInfo(
os_name='linux', linux_distribution=distribution)
self.assertEqual(
port._apache_config_file_name_for_platform(), # pylint: disable=protected-access
config_file)
def test_apache_config_file_name_for_platform(self):
port = self.make_port()
port._apache_version = lambda: '2.2' # pylint: disable=protected-access
self._assert_config_file_for_platform(port, 'linux',
'apache2-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(
port, 'arch', 'arch-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(
port, 'debian', 'debian-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(
port, 'fedora', 'fedora-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(
port, 'slackware', 'apache2-httpd-2.2.conf')
self._assert_config_file_for_linux_distribution(
port, 'redhat', 'redhat-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'mac',
'apache2-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'win32',
'apache2-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'barf',
'apache2-httpd-2.2.conf')
def test_skips_test_in_smoke_tests(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: True
port.host.filesystem.write_text_file(port.path_to_smoke_tests_file(),
'passes/text.html\n')
self.assertTrue(port.skips_test('failures/expected/image.html'))
def test_skips_test_no_skip_smoke_tests_file(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: True
self.assertFalse(port.skips_test('failures/expected/image.html'))
def test_skips_test_port_doesnt_skip_smoke_tests(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: False
self.assertFalse(port.skips_test('failures/expected/image.html'))
def test_skips_test_in_test_expectations(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: False
port.host.filesystem.write_text_file(
port.path_to_generic_test_expectations_file(),
'Bug(test) failures/expected/image.html [ Skip ]\n')
self.assertFalse(port.skips_test('failures/expected/image.html'))
def test_skips_test_in_never_fix_tests(self):
port = self.make_port(with_tests=True)
port.default_smoke_test_only = lambda: False
port.host.filesystem.write_text_file(
port.path_to_never_fix_tests_file(),
'# results: [ Skip ]\nfailures/expected/image.html [ Skip ]\n')
self.assertTrue(port.skips_test('failures/expected/image.html'))
def test_split_webdriver_test_name(self):
self.assertEqual(
Port.split_webdriver_test_name(
"tests/accept_alert/accept.py>>foo"),
("tests/accept_alert/accept.py", "foo"))
self.assertEqual(
Port.split_webdriver_test_name("tests/accept_alert/accept.py"),
("tests/accept_alert/accept.py", None))
def test_split_webdriver_subtest_pytest_name(self):
self.assertEqual(
Port.split_webdriver_subtest_pytest_name(
"tests/accept_alert/accept.py::foo"),
("tests/accept_alert/accept.py", "foo"))
self.assertEqual(
Port.split_webdriver_subtest_pytest_name(
"tests/accept_alert/accept.py"),
("tests/accept_alert/accept.py", None))
def test_add_webdriver_subtest_suffix(self):
self.assertEqual(
Port.add_webdriver_subtest_suffix("abd", "bar"), "abd>>bar")
self.assertEqual(Port.add_webdriver_subtest_suffix("abd", None), "abd")
def test_add_webdriver_subtest_pytest_suffix(self):
wb_test_name = "abd"
sub_test_name = "bar"
full_webdriver_name = Port.add_webdriver_subtest_pytest_suffix(
wb_test_name, sub_test_name)
self.assertEqual(full_webdriver_name, "abd::bar")
def test_disable_system_font_check_and_nocheck_sys_deps(self):
port = self.make_port()
self.assertNotIn('--disable-system-font-check',
port.additional_driver_flags())
port = self.make_port(
options=optparse.Values({'nocheck_sys_deps': True}))
self.assertIn('--disable-system-font-check',
port.additional_driver_flags())
def test_enable_tracing(self):
options, _ = optparse.OptionParser().parse_args([])
options.enable_tracing = '*,-blink'
port = self.make_port(with_tests=True, options=options)
with mock.patch('time.strftime', return_value='TIME'):
self.assertEqual([
'--trace-startup=*,-blink',
'--trace-startup-duration=0',
'--trace-startup-file=trace_layout_test_non_virtual_TIME.json',
], port.args_for_test('non/virtual'))
def test_all_systems(self):
# Port.ALL_SYSTEMS should match CONFIGURATION_SPECIFIER_MACROS.
all_systems = []
for system in Port.ALL_SYSTEMS:
self.assertEqual(len(system), 2)
all_systems.append(system[0])
all_systems.sort()
configuration_specifier_macros = []
for macros in Port.CONFIGURATION_SPECIFIER_MACROS.values():
configuration_specifier_macros += macros
configuration_specifier_macros.sort()
self.assertListEqual(all_systems, configuration_specifier_macros)
def test_configuration_specifier_macros(self):
# CONFIGURATION_SPECIFIER_MACROS should contain all SUPPORTED_VERSIONS
# of each port. Must use real Port classes in this test.
for port_name, versions in Port.CONFIGURATION_SPECIFIER_MACROS.items():
port_class, _ = PortFactory.get_port_class(port_name)
self.assertIsNotNone(port_class, port_name)
self.assertListEqual(versions, list(port_class.SUPPORTED_VERSIONS))
class NaturalCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_order(self, x, y, predicate):
self.assertTrue(
predicate(self._port._natural_sort_key(x),
self._port._natural_sort_key(y)))
def test_natural_compare(self):
self.assert_order('a', 'a', operator.eq)
self.assert_order('ab', 'a', operator.gt)
self.assert_order('a', 'ab', operator.lt)
self.assert_order('', '', operator.eq)
self.assert_order('', 'ab', operator.lt)
self.assert_order('1', '2', operator.lt)
self.assert_order('2', '1', operator.gt)
self.assert_order('1', '10', operator.lt)
self.assert_order('2', '10', operator.lt)
self.assert_order('foo_1.html', 'foo_2.html', operator.lt)
self.assert_order('foo_1.1.html', 'foo_2.html', operator.lt)
self.assert_order('foo_1.html', 'foo_10.html', operator.lt)
self.assert_order('foo_2.html', 'foo_10.html', operator.lt)
self.assert_order('foo_23.html', 'foo_10.html', operator.gt)
self.assert_order('foo_23.html', 'foo_100.html', operator.lt)
class KeyCompareTest(unittest.TestCase):
def setUp(self):
self._port = TestPort(MockSystemHost())
def assert_cmp(self, x, y, result):
self.assertEqual(
cmp(self._port.test_key(x), self._port.test_key(y)), result)
def assert_order(self, x, y, predicate):
self.assertTrue(
predicate(self._port.test_key(x), self._port.test_key(y)))
def test_test_key(self):
self.assert_order('/a', '/a', operator.eq)
self.assert_order('/a', '/b', operator.lt)
self.assert_order('/a2', '/a10', operator.lt)
self.assert_order('/a2/foo', '/a10/foo', operator.lt)
self.assert_order('/a/foo11', '/a/foo2', operator.gt)
self.assert_order('/ab', '/a/a/b', operator.lt)
self.assert_order('/a/a/b', '/ab', operator.gt)
self.assert_order('/foo-bar/baz', '/foo/baz', operator.lt)
class VirtualTestSuiteTest(unittest.TestCase):
def test_basic(self):
suite = VirtualTestSuite(
prefix='suite', bases=['base/foo', 'base/bar'], args=['--args'])
self.assertEqual(suite.full_prefix, 'virtual/suite/')
self.assertEqual(suite.bases, ['base/foo', 'base/bar'])
self.assertEqual(suite.args, ['--args'])
def test_empty_bases(self):
suite = VirtualTestSuite(prefix='suite', bases=[], args=['--args'])
self.assertEqual(suite.full_prefix, 'virtual/suite/')
self.assertEqual(suite.bases, [])
self.assertEqual(suite.args, ['--args'])
def test_no_slash(self):
self.assertRaises(
AssertionError,
VirtualTestSuite,
prefix='suite/bar',
bases=['base/foo'],
args=['--args'])
| |
import camera
import cv2
import gradients
import numpy as np
import pylab
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import line
from moviepy.editor import VideoFileClip
import lpf
import os.path
import cardetect
import image_tools as it
from scipy.ndimage.measurements import label
import os
os.system('set CUDA_VISIBLE_DEVICES=""')
font = cv2.FONT_HERSHEY_SIMPLEX
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30.0 / 720.0 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700.0 # meters per pixel in x dimension
class ImageProcessing:
def __init__(self, img_size, calibration_set_pattern):
self.img_size = img_size
packfile = './calibration.pk'
if os.path.isfile(packfile):
print('loading calibration')
self.cam = camera.Camera()
self.cam.LoadCalibration(packfile)
else:
calibration_set = camera.CameraCalibrationSet(calibration_set_pattern)
self.cam = camera.Camera()
self.cam.LoadCalibrationSet(calibration_set)
self.cam.CalibrateFor(self.img_size)
self.cam.SaveCalibraton(packfile)
builder = camera.ViewPointBuilder.New()
builder.SetHorizonLine(0.65)
builder.SetBottomLine(0.96)
builder.SetNearView(0.8)
builder.SetFarView(0.15)
self.view = builder.BuildView(img_size)
self.locator = line.LaneLocator(img_size)
self.last_lane = None
self.ring = lpf.Smoother(0.4)
self.detector = cardetect.Detector()
self.detector.Load('model.h5')
def Filter(self, image):
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
yellow_low = np.array([ 10, 80, 100])
yellow_high = np.array([ 40, 255, 255])
yellow_mask = cv2.inRange(hsv, yellow_low, yellow_high)
white_low = np.array([ 0, 0, 220], dtype=np.uint8)
white_high = np.array([ 180, 255, 255], dtype=np.uint8)
white_mask = cv2.inRange(hsv, white_low, white_high)
# sobel
sobel_kernel=3
gray = hsv[:,:,2]
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary = np.zeros_like(image[:,:,0], dtype=np.uint8)
threshold_min = 5
threshold_max = 255
binary[(scaled_sobel >= threshold_min)&(scaled_sobel <= threshold_max)&((white_mask!=0)|(yellow_mask!=0))] = 1
return binary
def ApplyLane(self, original, lane):
out = np.zeros_like(original[:,:,:], dtype=np.uint8)
lane.Draw(out)
reverted = self.view.RevertBirdView(out)
result = cv2.addWeighted(original, 1, reverted, 0.3, 0)
img_size = (original.shape[1], original.shape[0])
lr, rr, offset = lane.CalaculateRadiuses(img_size, xm_per_pix, ym_per_pix)
text = 'left: {:6d}m, right: {:6d}m, offset: {:4.2f}m'.format(int(lr), int(rr), offset)
cv2.putText(result, text,(10,100), font, 1, (255,255,255), 2)
return result
def UseSmartLocate(self, original):
img = self.cam.Undistort(original)
heatmap, labels = self.DetectCars(img)
binary = self.Filter(img)
binary_bv = self.view.MakeBirdView(binary)
lane = self.locator.SmartLocate(binary_bv, self.last_lane) # search using previous fit or sliding window
lane = self.ring.ApplyLPF(lane)
result = self.ApplyLane(img, lane)
result = it.draw_labeled_bboxes(result, labels)
heatmap = it.binary_to_color(cv2.resize(heatmap, dsize=(240,140)))
y_offset = 60
x_offset = 1000
result[y_offset:y_offset+heatmap.shape[0], x_offset:x_offset+heatmap.shape[1],:] = heatmap
self.last_lane = lane
return result
#
# step by step process to capture images
#
def Demo(self, original):
it.save_image(original, '01_first_image_from_clip.png')
img = self.cam.Undistort(original)
it.save_image(img, '02_undistorted.png')
img_bv = self.view.MakeBirdView(img)
it.save_image(img_bv, '03_bird_view.png')
binary = self.Filter(img)
it.save_image(it.binary_to_color(binary), '04_filtered_by_sobel_and_color.png')
binary_bv = self.view.MakeBirdView(binary)
it.save_image(it.binary_to_color(binary_bv), '05_filtered_bird_view.png')
lane = self.locator.Locate(binary_bv) # search using sliding windows
out_img = lane.DrawSearch(binary_bv)
it.save_image(out_img, '06_sliding_windows_and_fitted_polynom.png')
result = self.ApplyLane(original, lane)
out_img = lane.DrawSearch(binary_bv)
it.save_image(result, '07_lane_applied_to_original.png')
lane = self.locator.Adjust(binary_bv, lane) # search using previous fit
it.save_image(out_img, '08_fitting_adjusted.png')
self.last_lane = lane
return result
#
# step by step process to run car detection
#
def DetectCarsDemo(self, original):
img = self.cam.Undistort(original)
it.save_image(img, '10_undistorted.png')
self.PrepareDetection(img)
heat = np.zeros_like(img[:,:,0], dtype=np.float32)
detector_expect=(self.detector.size, self.detector.size)
z=1
for boxes in self.slides:
it.save_image(it.draw_boxes(img, boxes), "12_" + str(z) + "_boxes.png")
windows = it.split_image(img, boxes, resize_to=detector_expect)
predictions = self.detector.Detect(windows)
heat = it.add_heat_value(heat, boxes, predictions)
it.save_image(heat, "13_" + str(z) + "_heat.png")
z += 1
heat = it.apply_threshold(heat, self.detection_threshold)
it.save_image(heat, "14_total_heat.png")
# Find final boxes from heatmap using label function
labels = label(heat)
draw_img = it.draw_labeled_bboxes(img, labels)
it.save_image(draw_img, "15_detected.png")
#it.show_heat(draw_img, heat)
return draw_img
def PrepareDetection(self, img):
self.detection_threshold = 6.5
self.heatmap_lpf = lpf.HeatmapSmoother(0.8)
self.heatmap_ave = lpf.HeatmapAverege()
sizes = [64, 128]
self.slides = []
for box_size in sizes:
boxes = it.slide_window(img, y_start_stop=[330,650], xy_window=(box_size,box_size), xy_overlap=(0.75, 0.75))
print(len(boxes))
self.slides.append(boxes)
def DetectCars(self, img):
heat = np.zeros_like(img[:,:,0], dtype=np.float)
detector_expect=(self.detector.size, self.detector.size)
for boxes in self.slides:
windows = np.asarray(it.split_image(img, boxes, resize_to=detector_expect))
predictions = self.detector.Detect(windows)
heat = it.add_heat_value(heat, boxes, predictions)
# ALT 1
#dig = np.zeros_like(img[:,:,0], dtype=np.uint8)
#dig[heat>self.detection_threshold]=1
#dig = np.copy(self.heatmap_ave.Apply(dig))
#dig[dig<=2]=0
#heat = dig
# ALT 2
dig = np.zeros_like(img[:,:,0], dtype=np.float)
dig[heat>self.detection_threshold]=1.0
dig = self.heatmap_lpf.ApplyLPF(dig)
dig[dig<0.7]=0.0
#heat = it.apply_threshold(dig, 0.8)
heat = dig
# ALT 3
#heat = self.heatmap_lpf.ApplyLPF(heat)
#heat = it.apply_threshold(heat, self.detection_threshold)
#dig = np.clip(dig, 0, 1)
return heat, label(heat)
def DemoCalibration(calibration_set_pattern):
calibration_set = camera.CameraCalibrationSet(calibration_set_pattern)
cam = camera.Camera()
cam.LoadCalibrationSet(calibration_set)
original=original = mpimg.imread(calibration_set.ImageAt(0))
img_size = (original.shape[1], original.shape[0])
cam.CalibrateFor(img_size)
img = cam.Undistort(original)
it.save_image(img, '00_undistorted.png')
def ProcessTestImage(calibration_set_pattern):
test = dataf + 'test_images/test1.jpg'
original = mpimg.imread(test)
img_size = (original.shape[1], original.shape[0])
processing = ImageProcessing(img_size, calibration_set_pattern)
result = processing.Demo(original)
it.show_images(original, result)
def ProcessDetectionTestImage(calibration_set_pattern):
test = dataf + 'test_images/test1.jpg'
original = mpimg.imread(test)
img_size = (original.shape[1], original.shape[0])
processing = ImageProcessing(img_size, calibration_set_pattern)
result = processing.DetectCarsDemo(original)
it.show_images(original, result)
def ProcessVideoClip(calibration_set_pattern):
videoin = dataf + 'project_video.mp4'
clip = VideoFileClip(videoin, audio=False) #.subclip(37,43)
original = clip.make_frame(0)
img_size = (original.shape[1], original.shape[0])
print(img_size)
processing = ImageProcessing(img_size, calibration_set_pattern)
processing.PrepareDetection(original)
def process_clip_frame(image):
return processing.UseSmartLocate(image)
result = processing.UseSmartLocate(original)
lane_found_clip = clip.fl_image(process_clip_frame)
lane_found_clip.write_videofile('out/lane_detected.mp4', audio=False)
def TroubleshootVideoClip(calibration_set_pattern):
videoin = dataf + 'project_video.mp4'
clip = VideoFileClip(videoin, audio=False)
original = clip.make_frame(41.4)
img_size = (original.shape[1], original.shape[0])
processing = ImageProcessing(img_size, calibration_set_pattern)
processing.Demo(original)
#### source of data
dataf='../../CarND-Advanced-Lane-Lines/'
calibration_set_pattern = dataf + 'camera_cal/c*.jpg'
ProcessDetectionTestImage(calibration_set_pattern)
# ProcessVideoClip(calibration_set_pattern)
| |
from OpenGL.GL import *
from OpenGL.GL import shaders
import re
## For centralizing and managing vertex/fragment shader programs.
def initShaders():
global Shaders
Shaders = [
ShaderProgram(None, []),
## increases fragment alpha as the normal turns orthogonal to the view
## this is useful for viewing shells that enclose a volume (such as isosurfaces)
ShaderProgram('balloon', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
color.w = min(color.w + 2.0 * color.w * pow(normal.x*normal.x + normal.y*normal.y, 5.0), 1.0);
gl_FragColor = color;
}
""")
]),
## colors fragments based on face normals relative to view
## This means that the colors will change depending on how the view is rotated
ShaderProgram('viewNormalColor', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
color.x = (normal.x + 1.0) * 0.5;
color.y = (normal.y + 1.0) * 0.5;
color.z = (normal.z + 1.0) * 0.5;
gl_FragColor = color;
}
""")
]),
## colors fragments based on absolute face normals.
ShaderProgram('normalColor', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
color.x = (normal.x + 1.0) * 0.5;
color.y = (normal.y + 1.0) * 0.5;
color.z = (normal.z + 1.0) * 0.5;
gl_FragColor = color;
}
""")
]),
## very simple simulation of lighting.
## The light source position is always relative to the camera.
ShaderProgram('shaded', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
float p = dot(normal, normalize(vec3(1.0, -1.0, -1.0)));
p = p < 0. ? 0. : p * 0.8;
vec4 color = gl_Color;
color.x = color.x * (0.2 + p);
color.y = color.y * (0.2 + p);
color.z = color.z * (0.2 + p);
gl_FragColor = color;
}
""")
]),
## colors get brighter near edges of object
ShaderProgram('edgeHilight', [
VertexShader("""
varying vec3 normal;
void main() {
// compute here for use in fragment shader
normal = normalize(gl_NormalMatrix * gl_Normal);
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
gl_Position = ftransform();
}
"""),
FragmentShader("""
varying vec3 normal;
void main() {
vec4 color = gl_Color;
float s = pow(normal.x*normal.x + normal.y*normal.y, 2.0);
color.x = color.x + s * (1.0-color.x);
color.y = color.y + s * (1.0-color.y);
color.z = color.z + s * (1.0-color.z);
gl_FragColor = color;
}
""")
]),
## colors fragments by z-value.
## This is useful for coloring surface plots by height.
## This shader uses a uniform called "colorMap" to determine how to map the colors:
## red = pow(z * colorMap[0] + colorMap[1], colorMap[2])
## green = pow(z * colorMap[3] + colorMap[4], colorMap[5])
## blue = pow(z * colorMap[6] + colorMap[7], colorMap[8])
## (set the values like this: shader['uniformMap'] = array([...])
ShaderProgram('heightColor', [
VertexShader("""
varying vec4 pos;
void main() {
gl_FrontColor = gl_Color;
gl_BackColor = gl_Color;
pos = gl_Vertex;
gl_Position = ftransform();
}
"""),
FragmentShader("""
uniform float colorMap[9];
varying vec4 pos;
//out vec4 gl_FragColor; // only needed for later glsl versions
//in vec4 gl_Color;
void main() {
vec4 color = gl_Color;
color.x = colorMap[0] * (pos.z + colorMap[1]);
if (colorMap[2] != 1.0)
color.x = pow(color.x, colorMap[2]);
color.x = color.x < 0. ? 0. : (color.x > 1. ? 1. : color.x);
color.y = colorMap[3] * (pos.z + colorMap[4]);
if (colorMap[5] != 1.0)
color.y = pow(color.y, colorMap[5]);
color.y = color.y < 0. ? 0. : (color.y > 1. ? 1. : color.y);
color.z = colorMap[6] * (pos.z + colorMap[7]);
if (colorMap[8] != 1.0)
color.z = pow(color.z, colorMap[8]);
color.z = color.z < 0. ? 0. : (color.z > 1. ? 1. : color.z);
color.w = 1.0;
gl_FragColor = color;
}
"""),
], uniforms={'colorMap': [1, 1, 1, 1, 0.5, 1, 1, 0, 1]}),
ShaderProgram('pointSprite', [ ## allows specifying point size using normal.x
## See:
##
## http://stackoverflow.com/questions/9609423/applying-part-of-a-texture-sprite-sheet-texture-map-to-a-point-sprite-in-ios
## http://stackoverflow.com/questions/3497068/textured-points-in-opengl-es-2-0
##
##
VertexShader("""
void main() {
gl_FrontColor=gl_Color;
gl_PointSize = gl_Normal.x;
gl_Position = ftransform();
}
"""),
#FragmentShader("""
##version 120
#uniform sampler2D texture;
#void main ( )
#{
#gl_FragColor = texture2D(texture, gl_PointCoord) * gl_Color;
#}
#""")
]),
]
CompiledShaderPrograms = {}
def getShaderProgram(name):
return ShaderProgram.names[name]
class Shader(object):
def __init__(self, shaderType, code):
self.shaderType = shaderType
self.code = code
self.compiled = None
def shader(self):
if self.compiled is None:
try:
self.compiled = shaders.compileShader(self.code, self.shaderType)
except RuntimeError as exc:
## Format compile errors a bit more nicely
if len(exc.args) == 3:
err, code, typ = exc.args
if not err.startswith('Shader compile failure'):
raise
code = code[0].split('\n')
err, c, msgs = err.partition(':')
err = err + '\n'
msgs = msgs.split('\n')
errNums = [()] * len(code)
for i, msg in enumerate(msgs):
msg = msg.strip()
if msg == '':
continue
m = re.match(r'(\d+\:)?\d+\((\d+)\)', msg)
if m is not None:
line = int(m.groups()[1])
errNums[line-1] = errNums[line-1] + (str(i+1),)
#code[line-1] = '%d\t%s' % (i+1, code[line-1])
err = err + "%d %s\n" % (i+1, msg)
errNums = [','.join(n) for n in errNums]
maxlen = max(map(len, errNums))
code = [errNums[i] + " "*(maxlen-len(errNums[i])) + line for i, line in enumerate(code)]
err = err + '\n'.join(code)
raise Exception(err)
else:
raise
return self.compiled
class VertexShader(Shader):
def __init__(self, code):
Shader.__init__(self, GL_VERTEX_SHADER, code)
class FragmentShader(Shader):
def __init__(self, code):
Shader.__init__(self, GL_FRAGMENT_SHADER, code)
class ShaderProgram(object):
names = {}
def __init__(self, name, shaders, uniforms=None):
self.name = name
ShaderProgram.names[name] = self
self.shaders = shaders
self.prog = None
self.blockData = {}
self.uniformData = {}
## parse extra options from the shader definition
if uniforms is not None:
for k,v in uniforms.items():
self[k] = v
def setBlockData(self, blockName, data):
if data is None:
del self.blockData[blockName]
else:
self.blockData[blockName] = data
def setUniformData(self, uniformName, data):
if data is None:
del self.uniformData[uniformName]
else:
self.uniformData[uniformName] = data
def __setitem__(self, item, val):
self.setUniformData(item, val)
def __delitem__(self, item):
self.setUniformData(item, None)
def program(self):
if self.prog is None:
try:
compiled = [s.shader() for s in self.shaders] ## compile all shaders
self.prog = shaders.compileProgram(*compiled) ## compile program
except:
self.prog = -1
raise
return self.prog
def __enter__(self):
if len(self.shaders) > 0 and self.program() != -1:
glUseProgram(self.program())
try:
## load uniform values into program
for uniformName, data in self.uniformData.items():
loc = self.uniform(uniformName)
if loc == -1:
raise Exception('Could not find uniform variable "%s"' % uniformName)
glUniform1fv(loc, len(data), data)
### bind buffer data to program blocks
#if len(self.blockData) > 0:
#bindPoint = 1
#for blockName, data in self.blockData.items():
### Program should have a uniform block declared:
###
### layout (std140) uniform blockName {
### vec4 diffuse;
### };
### pick any-old binding point. (there are a limited number of these per-program
#bindPoint = 1
### get the block index for a uniform variable in the shader
#blockIndex = glGetUniformBlockIndex(self.program(), blockName)
### give the shader block a binding point
#glUniformBlockBinding(self.program(), blockIndex, bindPoint)
### create a buffer
#buf = glGenBuffers(1)
#glBindBuffer(GL_UNIFORM_BUFFER, buf)
#glBufferData(GL_UNIFORM_BUFFER, size, data, GL_DYNAMIC_DRAW)
### also possible to use glBufferSubData to fill parts of the buffer
### bind buffer to the same binding point
#glBindBufferBase(GL_UNIFORM_BUFFER, bindPoint, buf)
except:
glUseProgram(0)
raise
def __exit__(self, *args):
if len(self.shaders) > 0:
glUseProgram(0)
def uniform(self, name):
"""Return the location integer for a uniform variable in this program"""
return glGetUniformLocation(self.program(), name)
#def uniformBlockInfo(self, blockName):
#blockIndex = glGetUniformBlockIndex(self.program(), blockName)
#count = glGetActiveUniformBlockiv(self.program(), blockIndex, GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS)
#indices = []
#for i in range(count):
#indices.append(glGetActiveUniformBlockiv(self.program(), blockIndex, GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES))
class HeightColorShader(ShaderProgram):
def __enter__(self):
## Program should have a uniform block declared:
##
## layout (std140) uniform blockName {
## vec4 diffuse;
## vec4 ambient;
## };
## pick any-old binding point. (there are a limited number of these per-program
bindPoint = 1
## get the block index for a uniform variable in the shader
blockIndex = glGetUniformBlockIndex(self.program(), "blockName")
## give the shader block a binding point
glUniformBlockBinding(self.program(), blockIndex, bindPoint)
## create a buffer
buf = glGenBuffers(1)
glBindBuffer(GL_UNIFORM_BUFFER, buf)
glBufferData(GL_UNIFORM_BUFFER, size, data, GL_DYNAMIC_DRAW)
## also possible to use glBufferSubData to fill parts of the buffer
## bind buffer to the same binding point
glBindBufferBase(GL_UNIFORM_BUFFER, bindPoint, buf)
initShaders()
| |
# -*- coding: utf-8 -*-
"""
celery.app.base
~~~~~~~~~~~~~~~
Application Base Class.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import with_statement
import warnings
from collections import deque
from contextlib import contextmanager
from copy import deepcopy
from functools import wraps
from billiard.util import register_after_fork
from kombu.clocks import LamportClock
from kombu.utils import cached_property
from celery import platforms
from celery.exceptions import AlwaysEagerIgnored
from celery.loaders import get_loader_cls
from celery.local import PromiseProxy, maybe_evaluate
from celery.state import _task_stack, _tls, get_current_app
from celery.utils.functional import first
from celery.utils.imports import instantiate, symbol_by_name
from .annotations import prepare as prepare_annotations
from .builtins import shared_task, load_shared_tasks
from .defaults import DEFAULTS, find_deprecated_settings
from .registry import TaskRegistry
from .utils import AppPickler, Settings, bugreport, _unpickle_app
def _unpickle_appattr(reverse_name, args):
"""Given an attribute name and a list of args, gets
the attribute from the current app and calls it."""
return get_current_app()._rgetattr(reverse_name)(*args)
class Celery(object):
Pickler = AppPickler
SYSTEM = platforms.SYSTEM
IS_OSX, IS_WINDOWS = platforms.IS_OSX, platforms.IS_WINDOWS
amqp_cls = "celery.app.amqp:AMQP"
backend_cls = None
events_cls = "celery.events:Events"
loader_cls = "celery.loaders.app:AppLoader"
log_cls = "celery.app.log:Logging"
control_cls = "celery.app.control:Control"
registry_cls = TaskRegistry
_pool = None
def __init__(self, main=None, loader=None, backend=None,
amqp=None, events=None, log=None, control=None,
set_as_current=True, accept_magic_kwargs=False,
tasks=None, broker=None, include=None, **kwargs):
self.clock = LamportClock()
self.main = main
self.amqp_cls = amqp or self.amqp_cls
self.backend_cls = backend or self.backend_cls
self.events_cls = events or self.events_cls
self.loader_cls = loader or self.loader_cls
self.log_cls = log or self.log_cls
self.control_cls = control or self.control_cls
self.set_as_current = set_as_current
self.registry_cls = symbol_by_name(self.registry_cls)
self.accept_magic_kwargs = accept_magic_kwargs
self.finalized = False
self._pending = deque()
self._tasks = tasks
if not isinstance(self._tasks, TaskRegistry):
self._tasks = TaskRegistry(self._tasks or {})
# these options are moved to the config to
# simplify pickling of the app object.
self._preconf = {}
if broker:
self._preconf["BROKER_URL"] = broker
if include:
self._preconf["CELERY_IMPORTS"] = include
if self.set_as_current:
self.set_current()
self.on_init()
def set_current(self):
_tls.current_app = self
def on_init(self):
"""Optional callback called at init."""
pass
def start(self, argv=None):
return instantiate("celery.bin.celery:CeleryCommand", app=self) \
.execute_from_commandline(argv)
def worker_main(self, argv=None):
return instantiate("celery.bin.celeryd:WorkerCommand", app=self) \
.execute_from_commandline(argv)
def task(self, *args, **opts):
"""Creates new task class from any callable."""
def inner_create_task_cls(shared=True, filter=None, **opts):
def _create_task_cls(fun):
if shared:
cons = lambda app: app._task_from_fun(fun, **opts)
cons.__name__ = fun.__name__
shared_task(cons)
if self.accept_magic_kwargs: # compat mode
task = self._task_from_fun(fun, **opts)
if filter:
task = filter(task)
return task
# return a proxy object that is only evaluated when first used
promise = PromiseProxy(self._task_from_fun, (fun, ), opts)
self._pending.append(promise)
if filter:
return filter(promise)
return promise
return _create_task_cls
if len(args) == 1 and callable(args[0]):
return inner_create_task_cls(**opts)(*args)
return inner_create_task_cls(**opts)
def _task_from_fun(self, fun, **options):
base = options.pop("base", None) or self.Task
T = type(fun.__name__, (base, ), dict({
"app": self,
"accept_magic_kwargs": False,
"run": staticmethod(fun),
"__doc__": fun.__doc__,
"__module__": fun.__module__}, **options))()
task = self._tasks[T.name] # return global instance.
task.bind(self)
return task
def finalize(self):
if not self.finalized:
self.finalized = True
load_shared_tasks(self)
pending = self._pending
while pending:
maybe_evaluate(pending.pop())
for task in self._tasks.itervalues():
task.bind(self)
def config_from_object(self, obj, silent=False):
del(self.conf)
return self.loader.config_from_object(obj, silent=silent)
def config_from_envvar(self, variable_name, silent=False):
del(self.conf)
return self.loader.config_from_envvar(variable_name, silent=silent)
def config_from_cmdline(self, argv, namespace="celery"):
self.conf.update(self.loader.cmdline_config_parser(argv, namespace))
def send_task(self, name, args=None, kwargs=None, countdown=None,
eta=None, task_id=None, publisher=None, connection=None,
result_cls=None, expires=None, queues=None, **options):
if self.conf.CELERY_ALWAYS_EAGER: # pragma: no cover
warnings.warn(AlwaysEagerIgnored(
"CELERY_ALWAYS_EAGER has no effect on send_task"))
result_cls = result_cls or self.AsyncResult
router = self.amqp.Router(queues)
options.setdefault("compression",
self.conf.CELERY_MESSAGE_COMPRESSION)
options = router.route(options, name, args, kwargs)
with self.default_producer(publisher) as producer:
return result_cls(producer.delay_task(name, args, kwargs,
task_id=task_id,
countdown=countdown, eta=eta,
expires=expires, **options))
def broker_connection(self, hostname=None, userid=None,
password=None, virtual_host=None, port=None, ssl=None,
insist=None, connect_timeout=None, transport=None,
transport_options=None, **kwargs):
conf = self.conf
return self.amqp.BrokerConnection(
hostname or conf.BROKER_HOST,
userid or conf.BROKER_USER,
password or conf.BROKER_PASSWORD,
virtual_host or conf.BROKER_VHOST,
port or conf.BROKER_PORT,
transport=transport or conf.BROKER_TRANSPORT,
insist=self.either("BROKER_INSIST", insist),
ssl=self.either("BROKER_USE_SSL", ssl),
connect_timeout=self.either(
"BROKER_CONNECTION_TIMEOUT", connect_timeout),
transport_options=dict(conf.BROKER_TRANSPORT_OPTIONS,
**transport_options or {}))
@contextmanager
def default_connection(self, connection=None, *args, **kwargs):
if connection:
yield connection
else:
with self.pool.acquire(block=True) as connection:
yield connection
@contextmanager
def default_producer(self, producer=None):
if producer:
yield producer
else:
with self.amqp.producer_pool.acquire(block=True) as producer:
yield producer
def with_default_connection(self, fun):
"""With any function accepting a `connection`
keyword argument, establishes a default connection if one is
not already passed to it.
Any automatically established connection will be closed after
the function returns.
**Deprecated**
Use ``with app.default_connection(connection)`` instead.
"""
@wraps(fun)
def _inner(*args, **kwargs):
connection = kwargs.pop("connection", None)
with self.default_connection(connection) as c:
return fun(*args, **dict(kwargs, connection=c))
return _inner
def prepare_config(self, c):
"""Prepare configuration before it is merged with the defaults."""
return find_deprecated_settings(c)
def now(self):
return self.loader.now(utc=self.conf.CELERY_ENABLE_UTC)
def mail_admins(self, subject, body, fail_silently=False):
if self.conf.ADMINS:
to = [admin_email for _, admin_email in self.conf.ADMINS]
return self.loader.mail_admins(subject, body, fail_silently, to=to,
sender=self.conf.SERVER_EMAIL,
host=self.conf.EMAIL_HOST,
port=self.conf.EMAIL_PORT,
user=self.conf.EMAIL_HOST_USER,
password=self.conf.EMAIL_HOST_PASSWORD,
timeout=self.conf.EMAIL_TIMEOUT,
use_ssl=self.conf.EMAIL_USE_SSL,
use_tls=self.conf.EMAIL_USE_TLS)
def select_queues(self, queues=None):
return self.amqp.queues.select_subset(queues)
def either(self, default_key, *values):
"""Fallback to the value of a configuration key if none of the
`*values` are true."""
return first(None, values) or self.conf.get(default_key)
def bugreport(self):
return bugreport(self)
def _get_backend(self):
from celery.backends import get_backend_by_url
backend, url = get_backend_by_url(
self.backend_cls or self.conf.CELERY_RESULT_BACKEND,
self.loader)
return backend(app=self, url=url)
def _get_config(self):
s = Settings({}, [self.prepare_config(self.loader.conf),
deepcopy(DEFAULTS)])
if self._preconf:
for key, value in self._preconf.iteritems():
setattr(s, key, value)
return s
def _after_fork(self, obj_):
if self._pool:
self._pool.force_close_all()
self._pool = None
def create_task_cls(self):
"""Creates a base task class using default configuration
taken from this app."""
return self.subclass_with_self("celery.app.task:Task", name="Task",
attribute="_app", abstract=True)
def subclass_with_self(self, Class, name=None, attribute="app",
reverse=None, **kw):
"""Subclass an app-compatible class by setting its app attribute
to be this app instance.
App-compatible means that the class has a class attribute that
provides the default app it should use, e.g.
``class Foo: app = None``.
:param Class: The app-compatible class to subclass.
:keyword name: Custom name for the target class.
:keyword attribute: Name of the attribute holding the app,
default is "app".
"""
Class = symbol_by_name(Class)
reverse = reverse if reverse else Class.__name__
def __reduce__(self):
return _unpickle_appattr, (reverse, self.__reduce_args__())
attrs = dict({attribute: self}, __module__=Class.__module__,
__doc__=Class.__doc__, __reduce__=__reduce__, **kw)
return type(name or Class.__name__, (Class, ), attrs)
def _rgetattr(self, path):
return reduce(getattr, [self] + path.split('.'))
def __repr__(self):
return "<%s %s:0x%x>" % (self.__class__.__name__,
self.main or "__main__", id(self), )
def __reduce__(self):
# Reduce only pickles the configuration changes,
# so the default configuration doesn't have to be passed
# between processes.
return (_unpickle_app, (self.__class__, self.Pickler)
+ self.__reduce_args__())
def __reduce_args__(self):
return (self.main, self.conf.changes, self.loader_cls,
self.backend_cls, self.amqp_cls, self.events_cls,
self.log_cls, self.control_cls, self.accept_magic_kwargs)
@cached_property
def Worker(self):
"""Create new :class:`~celery.apps.worker.Worker` instance."""
return self.subclass_with_self("celery.apps.worker:Worker")
@cached_property
def WorkController(self, **kwargs):
return self.subclass_with_self("celery.worker:WorkController")
@cached_property
def Beat(self, **kwargs):
"""Create new :class:`~celery.apps.beat.Beat` instance."""
return self.subclass_with_self("celery.apps.beat:Beat")
@cached_property
def TaskSet(self):
return self.subclass_with_self("celery.task.sets:TaskSet")
@cached_property
def Task(self):
"""Default Task base class for this application."""
return self.create_task_cls()
@cached_property
def annotations(self):
return prepare_annotations(self.conf.CELERY_ANNOTATIONS)
@cached_property
def AsyncResult(self):
return self.subclass_with_self("celery.result:AsyncResult")
@cached_property
def TaskSetResult(self):
return self.subclass_with_self("celery.result:TaskSetResult")
@property
def pool(self):
if self._pool is None:
register_after_fork(self, self._after_fork)
self._pool = self.broker_connection().Pool(
limit=self.conf.BROKER_POOL_LIMIT)
return self._pool
@property
def current_task(self):
return _task_stack.top
@cached_property
def amqp(self):
"""Sending/receiving messages. See :class:`~celery.app.amqp.AMQP`."""
return instantiate(self.amqp_cls, app=self)
@cached_property
def backend(self):
"""Storing/retrieving task state. See
:class:`~celery.backend.base.BaseBackend`."""
return self._get_backend()
@cached_property
def conf(self):
"""Current configuration (dict and attribute access)."""
return self._get_config()
@cached_property
def control(self):
"""Controlling worker nodes. See
:class:`~celery.app.control.Control`."""
return instantiate(self.control_cls, app=self)
@cached_property
def events(self):
"""Sending/receiving events. See :class:`~celery.events.Events`. """
return instantiate(self.events_cls, app=self)
@cached_property
def loader(self):
"""Current loader."""
return get_loader_cls(self.loader_cls)(app=self)
@cached_property
def log(self):
"""Logging utilities. See :class:`~celery.app.log.Logging`."""
return instantiate(self.log_cls, app=self)
@cached_property
def tasks(self):
"""Registry of available tasks.
Accessing this attribute will also finalize the app.
"""
self.finalize()
return self._tasks
App = Celery # compat
| |
"""Configuration details for specific server types.
This module contains functions that help with initializing a Fabric environment
for standard server types.
"""
import os
import subprocess
from fabric.api import env
from cloudbio.fabutils import quiet
from cloudbio.fabutils import configure_runsudo
from cloudbio.custom import system
def _setup_distribution_environment(ignore_distcheck=False):
"""Setup distribution environment.
In low-level terms, this method attempts to populate various values in the fabric
env data structure for use other places in CloudBioLinux.
"""
if "distribution" not in env:
env.distribution = "__auto__"
if "dist_name" not in env:
env.dist_name = "__auto__"
env.logger.info("Distribution %s" % env.distribution)
if env.hosts == ["vagrant"]:
_setup_vagrant_environment()
elif env.hosts == ["localhost"]:
_setup_local_environment()
configure_runsudo(env)
if env.distribution == "__auto__":
env.distribution = _determine_distribution(env)
if env.distribution == "ubuntu":
## TODO: Determine if dist_name check works with debian.
if env.dist_name == "__auto__":
env.dist_name = _ubuntu_dist_name(env)
_setup_ubuntu()
elif env.distribution == "centos":
_setup_centos()
elif env.distribution == "scientificlinux":
_setup_scientificlinux()
elif env.distribution == "debian":
if env.dist_name == "__auto__":
env.dist_name = _debian_dist_name(env)
_setup_debian()
elif env.distribution == "arch":
pass # No package support for Arch yet
elif env.distribution == "suse":
pass # No package support for SUSE yet
elif env.distribution == "macosx":
_setup_macosx(env)
ignore_distcheck = True
else:
raise ValueError("Unexpected distribution %s" % env.distribution)
if not ignore_distcheck:
_validate_target_distribution(env.distribution, env.get('dist_name', None))
_cloudman_compatibility(env)
_setup_nixpkgs()
_setup_fullpaths(env)
# allow us to check for packages only available on 64bit machines
machine = env.safe_run_output("uname -m")
env.is_64bit = machine.find("_64") > 0
def _setup_fullpaths(env):
home_dir = env.safe_run_output("echo $HOME")
for attr in ["data_files", "galaxy_home", "local_install"]:
if hasattr(env, attr):
x = getattr(env, attr)
if x.startswith("~"):
x = x.replace("~", home_dir)
setattr(env, attr, x)
def _cloudman_compatibility(env):
"""Environmental variable naming for compatibility with CloudMan.
"""
env.install_dir = env.system_install
def _validate_target_distribution(dist, dist_name=None):
"""Check target matches environment setting (for sanity)
Throws exception on error
"""
env.logger.debug("Checking target distribution " + env.distribution)
if dist in ["debian", "ubuntu"]:
tag = env.safe_run_output("cat /proc/version")
if tag.lower().find(dist) == -1:
# hmmm, test issue file
tag2 = env.safe_run_output("cat /etc/issue")
if tag2.lower().find(dist) == -1:
raise ValueError("Distribution does not match machine; are you using correct fabconfig for " + dist)
if not dist_name:
raise ValueError("Must specify a dist_name property when working with distribution %s" % dist)
# Does this new method work with CentOS, do we need this.
if dist == 'debian':
actual_dist_name = _debian_dist_name(env)
else:
actual_dist_name = _ubuntu_dist_name(env)
if actual_dist_name != dist_name:
raise ValueError("Distribution does not match machine; are you using correct fabconfig <"+actual_dist_name+"> for " + dist)
else:
env.logger.debug("Unknown target distro")
def _setup_ubuntu():
env.logger.info("Ubuntu setup")
shared_sources = _setup_deb_general()
# package information. This is ubuntu/debian based and could be generalized.
sources = [
"deb http://us.archive.ubuntu.com/ubuntu/ %s universe", # unsupported repos
"deb http://us.archive.ubuntu.com/ubuntu/ %s multiverse",
"deb http://us.archive.ubuntu.com/ubuntu/ %s-updates universe",
"deb http://us.archive.ubuntu.com/ubuntu/ %s-updates multiverse",
"deb http://archive.canonical.com/ubuntu %s partner", # partner repositories
"deb http://cran.fhcrc.org/bin/linux/ubuntu %s/", # lastest R versions
"deb http://archive.canonical.com/ubuntu %s partner", # sun-java
"deb http://ppa.launchpad.net/nebc/bio-linux/ubuntu trusty main", # Bio-Linux
"deb [arch=amd64 trusted=yes] http://research.cs.wisc.edu/htcondor/debian/stable/ squeeze contrib" # HTCondor
] + shared_sources
env.std_sources = _add_source_versions(env.dist_name, sources)
def _setup_debian():
env.logger.info("Debian setup")
unstable_remap = {"sid": "squeeze"}
shared_sources = _setup_deb_general()
sources = [
"deb http://cran.fhcrc.org/bin/linux/debian %s-cran/", # lastest R versions
"deb http://nebc.nerc.ac.uk/bio-linux/ unstable bio-linux", # Bio-Linux
] + shared_sources
# fill in %s
dist_name = unstable_remap.get(env.dist_name, env.dist_name)
env.std_sources = _add_source_versions(dist_name, sources)
def _setup_deb_general():
"""Shared settings for different debian based/derived distributions.
"""
env.logger.debug("Debian-shared setup")
env.sources_file = "/etc/apt/sources.list.d/cloudbiolinux.list"
env.global_sources_file = "/etc/apt/sources.list"
env.apt_preferences_file = "/etc/apt/preferences"
if not hasattr(env, "python_version_ext"):
env.python_version_ext = ""
if not hasattr(env, "ruby_version_ext"):
env.ruby_version_ext = "1.9.1"
if not env.has_key("java_home"):
# Try to determine java location from update-alternatives
java_home = "/usr/lib/jvm/java-7-openjdk-amd64"
with quiet():
java_info = env.safe_run_output("update-alternatives --display java")
for line in java_info.split("\n"):
if line.strip().startswith("link currently points to"):
java_home = line.split()[-1].strip()
java_home = java_home.replace("/jre/bin/java", "")
env.java_home = java_home
shared_sources = [
"deb http://download.virtualbox.org/virtualbox/debian %s contrib", # virtualbox
]
return shared_sources
def _setup_centos():
env.logger.info("CentOS setup")
if not hasattr(env, "python_version_ext"):
# use installed anaconda version instead of package 2.6
#env.python_version_ext = "2.6"
env.python_version_ext = ""
#env.pip_cmd = "pip-python"
if not hasattr(env, "ruby_version_ext"):
env.ruby_version_ext = ""
if not env.has_key("java_home"):
env.java_home = "/etc/alternatives/java_sdk"
def _setup_scientificlinux():
env.logger.info("ScientificLinux setup")
if not hasattr(env, "python_version_ext"):
env.python_version_ext = ""
env.pip_cmd = "pip-python"
if not env.has_key("java_home"):
env.java_home = "/etc/alternatives/java_sdk"
def _setup_macosx(env):
# XXX Only framework in place; needs testing
env.logger.info("MacOSX setup")
# XXX Ensure XCode is installed and provide useful directions if not
system.install_homebrew(env)
# XXX find java correctly
env.java_home = ""
def _setup_nixpkgs():
# for now, Nix packages are only supported in Debian - it can
# easily be done for others - just get Nix installed from the .rpm
nixpkgs = False
if env.has_key("nixpkgs"):
if env.distribution in ["debian", "ubuntu"]:
if env.nixpkgs == "True":
nixpkgs = True
else:
nixpkgs = False
else:
env.logger.warn("NixPkgs are currently not supported for " + env.distribution)
if nixpkgs:
env.logger.info("NixPkgs: supported")
else:
env.logger.debug("NixPkgs: Ignored")
env.nixpkgs = nixpkgs
def _setup_local_environment():
"""Setup a localhost environment based on system variables.
"""
env.logger.info("Get local environment")
if not env.has_key("user"):
env.user = os.environ["USER"]
def _setup_vagrant_environment():
"""Use vagrant commands to get connection information.
https://gist.github.com/1d4f7c3e98efdf860b7e
"""
env.logger.info("Get vagrant environment")
raw_ssh_config = subprocess.Popen(["vagrant", "ssh-config"],
stdout=subprocess.PIPE).communicate()[0]
env.logger.info(raw_ssh_config)
ssh_config = dict([l.strip().split() for l in raw_ssh_config.split("\n") if l])
env.user = ssh_config["User"]
env.hosts = [ssh_config["HostName"]]
env.port = ssh_config["Port"]
env.host_string = "%s@%s:%s" % (env.user, env.hosts[0], env.port)
env.key_filename = ssh_config["IdentityFile"].replace('"', '')
env.logger.debug("ssh %s" % env.host_string)
def _add_source_versions(version, sources):
"""Patch package source strings for version, e.g. Debian 'stable'
"""
name = version
env.logger.debug("Source=%s" % name)
final = []
for s in sources:
if s.find("%s") > 0:
s = s % name
final.append(s)
return final
def _ubuntu_dist_name(env):
"""
Determine Ubuntu dist name (e.g. precise or quantal).
"""
return env.safe_run_output("cat /etc/*release | grep DISTRIB_CODENAME | cut -f 2 -d =")
def _debian_dist_name(env):
"""
Determine Debian dist name (e.g. squeeze).
"""
return env.safe_run_output("lsb_release -a | grep Codename | cut -f 2")
def _determine_distribution(env):
"""
Attempt to automatically determine the distribution of the target machine.
Currently works for Ubuntu, CentOS, Debian, Scientific Linux and Mac OS X.
"""
with quiet():
output = env.safe_run_output("cat /etc/*release").lower()
if output.find("distrib_id=ubuntu") >= 0:
return "ubuntu"
elif output.find("centos release") >= 0:
return "centos"
elif output.find("centos linux release") >= 0:
return "centos"
elif output.find("red hat enterprise linux server release") >= 0:
return "centos"
elif output.find("fedora release") >= 0:
return "centos"
elif output.find("amzn") >= 0: # Amazon AMIs are Red-Hat based
return "centos"
elif output.find("suse linux") >= 0:
return "suse"
elif output.find("opensuse") >= 0:
return "suse"
elif output.find("scientific linux") >= 0:
return "scientificlinux"
elif env.safe_exists("/etc/debian_version"):
return "debian"
elif output.find("id=arch") >= 0:
return "arch"
# check for file used by Python's platform.mac_ver
elif env.safe_exists("/System/Library/CoreServices/SystemVersion.plist"):
return "macosx"
else:
raise Exception("Attempt to automatically determine Linux distribution of target machine failed, please manually specify distribution in fabricrc.txt")
| |
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from jacket.compute import exception
from jacket.objects.compute import base
from jacket.objects.compute import fields
from jacket.compute.virt import hardware
def all_things_equal(obj_a, obj_b):
for name in obj_a.fields:
set_a = obj_a.obj_attr_is_set(name)
set_b = obj_b.obj_attr_is_set(name)
if set_a != set_b:
return False
elif not set_a:
continue
if getattr(obj_a, name) != getattr(obj_b, name):
return False
return True
@base.NovaObjectRegistry.register
class NUMACell(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added pinned_cpus and siblings fields
# Version 1.2: Added mempages field
VERSION = '1.2'
fields = {
'id': fields.IntegerField(read_only=True),
'cpuset': fields.SetOfIntegersField(),
'memory': fields.IntegerField(),
'cpu_usage': fields.IntegerField(default=0),
'memory_usage': fields.IntegerField(default=0),
'pinned_cpus': fields.SetOfIntegersField(),
'siblings': fields.ListOfSetsOfIntegersField(),
'mempages': fields.ListOfObjectsField('NUMAPagesTopology'),
}
def __eq__(self, other):
return all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
@property
def free_cpus(self):
return self.cpuset - self.pinned_cpus or set()
@property
def free_siblings(self):
return [sibling_set & self.free_cpus
for sibling_set in self.siblings]
@property
def avail_cpus(self):
return len(self.free_cpus)
@property
def avail_memory(self):
return self.memory - self.memory_usage
def pin_cpus(self, cpus):
if cpus - self.cpuset:
raise exception.CPUPinningUnknown(requested=list(cpus),
cpuset=list(self.pinned_cpus))
if self.pinned_cpus & cpus:
raise exception.CPUPinningInvalid(requested=list(cpus),
pinned=list(self.pinned_cpus))
self.pinned_cpus |= cpus
def unpin_cpus(self, cpus):
if cpus - self.cpuset:
raise exception.CPUPinningUnknown(requested=list(cpus),
cpuset=list(self.pinned_cpus))
if (self.pinned_cpus & cpus) != cpus:
raise exception.CPUPinningInvalid(requested=list(cpus),
pinned=list(self.pinned_cpus))
self.pinned_cpus -= cpus
def pin_cpus_with_siblings(self, cpus):
pin_siblings = set()
for sib in self.siblings:
if cpus & sib:
pin_siblings.update(sib)
self.pin_cpus(pin_siblings)
def unpin_cpus_with_siblings(self, cpus):
pin_siblings = set()
for sib in self.siblings:
if cpus & sib:
pin_siblings.update(sib)
self.unpin_cpus(pin_siblings)
def _to_dict(self):
return {
'id': self.id,
'cpus': hardware.format_cpu_spec(
self.cpuset, allow_ranges=False),
'mem': {
'total': self.memory,
'used': self.memory_usage},
'cpu_usage': self.cpu_usage}
@classmethod
def _from_dict(cls, data_dict):
cpuset = hardware.parse_cpu_spec(
data_dict.get('cpus', ''))
cpu_usage = data_dict.get('cpu_usage', 0)
memory = data_dict.get('mem', {}).get('total', 0)
memory_usage = data_dict.get('mem', {}).get('used', 0)
cell_id = data_dict.get('id')
return cls(id=cell_id, cpuset=cpuset, memory=memory,
cpu_usage=cpu_usage, memory_usage=memory_usage,
mempages=[], pinned_cpus=set([]), siblings=[])
def can_fit_hugepages(self, pagesize, memory):
"""Returns whether memory can fit into hugepages size
:param pagesize: a page size in KibB
:param memory: a memory size asked to fit in KiB
:returns: whether memory can fit in hugepages
:raises: MemoryPageSizeNotSupported if page size not supported
"""
for pages in self.mempages:
if pages.size_kb == pagesize:
return (memory <= pages.free_kb and
(memory % pages.size_kb) == 0)
raise exception.MemoryPageSizeNotSupported(pagesize=pagesize)
@base.NovaObjectRegistry.register
class NUMAPagesTopology(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'size_kb': fields.IntegerField(),
'total': fields.IntegerField(),
'used': fields.IntegerField(default=0),
}
def __eq__(self, other):
return all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
@property
def free(self):
"""Returns the number of avail pages."""
return self.total - self.used
@property
def free_kb(self):
"""Returns the avail memory size in KiB."""
return self.free * self.size_kb
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class NUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Update NUMACell to 1.1
# Version 1.2: Update NUMACell to 1.2
VERSION = '1.2'
fields = {
'cells': fields.ListOfObjectsField('NUMACell'),
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'jacket_object.name' in primitive:
obj_topology = super(NUMATopology, cls).obj_from_primitive(
primitive, context=context)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = NUMATopology._from_dict(primitive)
return obj_topology
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
@classmethod
def obj_from_db_obj(cls, db_obj):
return cls.obj_from_primitive(
jsonutils.loads(db_obj))
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# TODO(sahid): needs to be removed.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
return cls(cells=[
NUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@base.NovaObjectRegistry.register
class NUMATopologyLimits(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'cpu_allocation_ratio': fields.FloatField(),
'ram_allocation_ratio': fields.FloatField(),
}
def to_dict_legacy(self, host_topology):
cells = []
for cell in host_topology.cells:
cells.append(
{'cpus': hardware.format_cpu_spec(
cell.cpuset, allow_ranges=False),
'mem': {'total': cell.memory,
'limit': cell.memory * self.ram_allocation_ratio},
'cpu_limit': len(cell.cpuset) * self.cpu_allocation_ratio,
'id': cell.id})
return {'cells': cells}
@classmethod
def obj_from_db_obj(cls, db_obj):
if 'jacket_object.name' in db_obj:
obj_topology = cls.obj_from_primitive(db_obj)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that all compute nodes are using RPC API => 3.40.
cell = db_obj['cells'][0]
ram_ratio = cell['mem']['limit'] / float(cell['mem']['total'])
cpu_ratio = cell['cpu_limit'] / float(len(hardware.parse_cpu_spec(
cell['cpus'])))
obj_topology = NUMATopologyLimits(
cpu_allocation_ratio=cpu_ratio,
ram_allocation_ratio=ram_ratio)
return obj_topology
| |
#! /usr/bin/env python
"""
Time conversion utilities
"""
import os
from datetime import datetime, timedelta
import time
import numpy as np
import matplotlib.dates
#Seconds per year
spy = 86400.*365.25
#lon,lat = geolib.get_center(ds, t_srs=geolib.wgs_srs)
def getTimeZone(lat, lon):
"""Get timezone for a given lat/lon
"""
#Need to fix for Python 2.x and 3.X support
import urllib.request, urllib.error, urllib.parse
import xml.etree.ElementTree as ET
#http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points=37.78%2C-122.42%3B40.71%2C-74.01&databases=Point%2CTimeZone%2CAstronomy%2CNaturalEarthCountry%2CUsState2010%2CUsCounty2010%2CUsCountySubdivision2010%2CUsTract2010%2CUsBlockGroup2010%2CUsPlace2010%2CUsZcta2010
req = "http://api.askgeo.com/v1/918/aa8292ec06199d1207ccc15be3180213c984832707f0cbf3d3859db279b4b324/query.xml?points="+str(lat)+"%2C"+str(lon)+"&databases=TimeZone"
opener = urllib.request.build_opener()
f = opener.open(req)
tree = ET.parse(f)
root = tree.getroot()
#Check response
tzid = None
if root.attrib['code'] == '0':
tz = list(root.iter('TimeZone'))[0]
#shortname = tz.attrib['ShortName']
tzid = tz.attrib['TimeZoneId']
return tzid
def getLocalTime(utc_dt, tz):
"""Return local timezone time
"""
import pytz
local_tz = pytz.timezone(tz)
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_dt
def ul_time(utc_dt, lon):
"""Compute local time for input longitude
"""
#return utc_dt + timedelta(hours=lon / np.pi * 12)
offset = timedelta(hours=(lon*(24.0/360)))
return utc_dt + offset
def solarTime(utc_dt, lat, lon):
"""Compute local solar time for given (lat, lon)
"""
import ephem
o = ephem.Observer()
o.date = utc_dt
o.lat = str(lat)
o.lon = str(lon)
sun = ephem.Sun()
sun.compute(o)
hour_angle = o.sidereal_time() - sun.ra
rad = str(ephem.hours(hour_angle + ephem.hours('12:00')).norm)
t = datetime.strptime(rad, '%H:%M:%S.%f')
solar_dt = datetime.combine(utc_dt.date(), t.time())
return solar_dt
def strptime_fuzzy(s):
"""Fuzzy date string parsing
Note: this returns current date if not found. If only year is provided, will return current month, day
"""
import dateutil.parser
dt = dateutil.parser.parse(str(s), fuzzy=True)
return dt
def fn_getdatetime(fn):
"""Extract datetime from input filename
"""
dt_list = fn_getdatetime_list(fn)
if dt_list:
return dt_list[0]
else:
return None
#Return datetime object extracted from arbitrary filename
def fn_getdatetime_list(fn):
"""Extract all datetime strings from input filename
"""
#Want to split last component
fn = os.path.split(os.path.splitext(fn)[0])[-1]
import re
#WV01_12JUN152223255-P1BS_R1C1-102001001B3B9800__WV01_12JUN152224050-P1BS_R1C1-102001001C555C00-DEM_4x.tif
#Need to parse above with month name
#Note: made this more restrictive to avoid false matches:
#'20130304_1510_1030010020770600_1030010020CEAB00-DEM_4x'
#This is a problem, b/c 2015/17/00:
#WV02_20130315_10300100207D5600_1030010020151700
#This code should be obsolete before 2019
#Assume new filenames
#fn = fn[0:13]
#Use cascading re find to pull out timestamps
#Note: Want to be less restrictive here - could have a mix of YYYYMMDD_HHMM, YYYYMMDD and YYYY in filename
#Should probably search for all possibilities, then prune
#NOTE: these don't include seconds in the time
#NOTE: could have 20130304_1510__20130304__whatever in filename
#The current approach will only catch the first datetime
dstr = None
out = None
#20180101_1200 or 20180101T1200
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])[_T](?:0[0-9]|1[0-9]|2[0-3])[0-5][0-9]', fn)
#201801011200
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])(?:0[0-9]|1[0-9]|2[0-3])[0-5][0-9]', fn)
#20180101
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])(?:$|_|-)', fn)
#This should pick up dates separated by a dash
#dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:0[1-9]|1[012])(?:0[1-9]|[12][0-9]|3[01])', fn)
#2018.609990
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9]\.[0-9][0-9][0-9]*(?:$|_|-)', fn)
dstr = [d.lstrip('_').rstrip('_') for d in dstr]
dstr = [d.lstrip('-').rstrip('-') for d in dstr]
out = [decyear2dt(float(s)) for s in dstr]
dstr = None
#2018
if not dstr:
dstr = re.findall(r'(?:^|_|-)(?:19|20)[0-9][0-9](?:$|_|-)', fn)
#This is for USGS archive filenames
if not dstr:
dstr = re.findall(r'[0-3][0-9][a-z][a-z][a-z][0-9][0-9]', fn)
#This is USGS archive format
if dstr:
out = [datetime.strptime(s, '%d%b%y') for s in dstr]
dstr = None
if dstr:
#This is a hack to remove peripheral underscores and dashes
dstr = [d.lstrip('_').rstrip('_') for d in dstr]
dstr = [d.lstrip('-').rstrip('-') for d in dstr]
#This returns an empty list of nothing is found
out = [strptime_fuzzy(s) for s in dstr]
return out
def get_t_factor(t1, t2):
"""Time difference between two datetimes, expressed as decimal year
"""
t_factor = None
if t1 is not None and t2 is not None and t1 != t2:
dt = t2 - t1
year = timedelta(days=365.25)
t_factor = abs(dt.total_seconds() / year.total_seconds())
return t_factor
def get_t_factor_fn(fn1, fn2, ds=None):
t_factor = None
#Extract timestamps from input filenames
t1 = fn_getdatetime(fn1)
t2 = fn_getdatetime(fn2)
t_factor = get_t_factor(t1,t2)
#Attempt to load timestamp arrays (for mosaics with variable timestamps)
t1_fn = os.path.splitext(fn1)[0]+'_ts.tif'
t2_fn = os.path.splitext(fn2)[0]+'_ts.tif'
if os.path.exists(t1_fn) and os.path.exists(t2_fn) and ds is not None:
print("Preparing timestamp arrays")
from pygeotools.lib import warplib
t1_ds, t2_ds = warplib.memwarp_multi_fn([t1_fn, t2_fn], extent=ds, res=ds)
print("Loading timestamps into masked arrays")
from pygeotools.lib import iolib
t1 = iolib.ds_getma(t1_ds)
t2 = iolib.ds_getma(t2_ds)
#This is a new masked array
t_factor = (t2 - t1) / 365.25
return t_factor
def sort_fn_list(fn_list):
"""Sort input filename list by datetime
"""
dt_list = get_dt_list(fn_list)
fn_list_sort = [fn for (dt,fn) in sorted(zip(dt_list,fn_list))]
return fn_list_sort
def fix_repeat_dt(dt_list, offset_s=0.001):
"""Add some small offset to remove duplicate times
Needed for xarray interp, which expects monotonically increasing times
"""
idx = (np.diff(dt_list) == timedelta(0))
while np.any(idx):
dt_list[idx.nonzero()[0] + 1] += timedelta(seconds=offset_s)
idx = (np.diff(dt_list) == timedelta(0))
return dt_list
def get_dt_list(fn_list):
"""Get list of datetime objects, extracted from a filename
"""
dt_list = np.array([fn_getdatetime(fn) for fn in fn_list])
return dt_list
#Pad must be timedelta
#pad=timedelta(days=30)
def filter_fn_list(dt, fn_list, pad):
dt_list = get_dt_list(fn_list)
#These should be sorted by time
#This pulls fixed number on either side of dt
#idx = timelib.get_closest_dt_idx(cdt, v_dt_list)
#idx = 'idx-2:idx+2'
#This pulls from a fixed time interval on either side of dt
idx = get_closest_dt_padded_idx(dt, dt_list, pad)
fn_list_sel = fn_list[idx]
return fn_list_sel
def get_closest_dt_fn(fn, fn_list):
dt = fn_getdatetime(fn)
dt_list = np.array([fn_getdatetime(fn) for fn in fn_list])
idx = get_closest_dt_idx(dt, dt_list)
return fn_list[idx]
def get_closest_dt_idx(dt, dt_list):
"""Get indices of dt_list that is closest to input dt
"""
from pygeotools.lib import malib
dt_list = malib.checkma(dt_list, fix=False)
dt_diff = np.abs(dt - dt_list)
return dt_diff.argmin()
def get_closest_dt_padded_idx(dt, dt_list, pad=timedelta(days=30)):
"""Get indices of dt_list that is closest to input dt +/- pad days
"""
#If pad is in decimal days
if not isinstance(pad, timedelta):
pad = timedelta(days=pad)
from pygeotools.lib import malib
dt_list = malib.checkma(dt_list, fix=False)
dt_diff = np.abs(dt - dt_list)
valid_idx = (dt_diff.data < pad).nonzero()[0]
return valid_idx
def get_unique_monthyear(dt_list):
my = [(dt.year,dt.month) for dt in dt_list]
return np.unique(my)
def get_dt_bounds_monthyear(dt_list):
my_list = get_unique_monthyear(dt_list)
out = []
for my in my_list:
dt1 = datetime(my[0], my[1], 1)
dt2 = datetime(my[0], my[1], 1)
out.append((dt1, dt2))
return out
def get_unique_years(dt_list):
years = [dt.year for dt in dt_list]
return np.unique(years)
def dt_filter_rel_annual_idx(dt_list, min_rel_dt=(1,1), max_rel_dt=(12,31)):
"""Return dictionary containing indices of timestamps that fall within relative month/day bounds of each year
"""
dt_list = np.array(dt_list)
years = get_unique_years(dt_list)
from collections import OrderedDict
out = OrderedDict()
for year in years:
#If within the same year
if min_rel_dt[0] < max_rel_dt[1]:
dt1 = datetime(year, min_rel_dt[0], min_rel_dt[1])
dt2 = datetime(year, max_rel_dt[0], max_rel_dt[1])
#Or if our relative values include Jan 1
else:
dt1 = datetime(year, min_rel_dt[0], min_rel_dt[1])
dt2 = datetime(year+1, max_rel_dt[0], max_rel_dt[1])
idx = np.logical_and((dt_list >= dt1), (dt_list <= dt2))
if np.any(idx):
out[year] = idx
return out
#Use this to get datetime bounds for annual mosaics
def get_dt_bounds(dt_list, min_rel_dt=(1,1), max_rel_dt=(12,31)):
years = get_unique_years(dt_list)
out = []
for year in years:
#If within the same year
if min_rel_dt[0] < max_rel_dt[1]:
dt1 = datetime(year, min_rel_dt[0], min_rel_dt[1])
dt2 = datetime(year, max_rel_dt[0], max_rel_dt[1])
else:
dt1 = datetime(year, min_rel_dt[0], min_rel_dt[1])
dt2 = datetime(year+1, max_rel_dt[0], max_rel_dt[1])
if dt2 > dt_list[0] and dt1 < dt_list[-1]:
out.append((dt1, dt2))
return out
def get_dt_bounds_fn(list_fn, min_rel_dt=(5,31), max_rel_dt=(6,1)):
f = open(list_fn, 'r')
fn_list = []
for line in f:
fn_list.append(line)
fn_list = np.array(fn_list)
fn_list.sort()
dt_list = [fn_getdatetime(fn) for fn in fn_list]
dt_list = np.array(dt_list)
bounds = get_dt_bounds(dt_list, min_rel_dt, max_rel_dt)
for b in bounds:
print(b)
c_date = center_date(b[0], b[1])
#c_date = datetime(b[1].year,1,1)
idx = (dt_list >= b[0]) & (dt_list < b[1])
#out_fn = os.path.splitext(list_fn)[0]+'_%s_%s-%s_fn_list.txt' % \
# (c_date.strftime('%Y%m%d'), b[0].strftime('%Y%m%d'), b[1].strftime('%Y%m%d'))
out_fn = '%s_%s-%s_fn_list.txt' % \
(c_date.strftime('%Y%m%d'), b[0].strftime('%Y%m%d'), b[1].strftime('%Y%m%d'))
out_f = open(out_fn, 'w')
for fn in fn_list[idx]:
out_f.write('%s\n' % fn)
out_f = None
#parallel 'dem_mosaic -l {} --count -o {.}' ::: 2*fn_list.txt
#This checks to see if input dt is between the given relative month/day interval
def rel_dt_test(dt, min_rel_dt=(1,1), max_rel_dt=(12,31)):
if dt_check(dt):
dt_doy = dt2doy(dt)
min_doy = dt2doy(datetime(dt.year, *min_rel_dt))
max_doy = dt2doy(datetime(dt.year, *max_rel_dt))
#If both relative dates are in the same year
if min_doy < max_doy:
min_dt = datetime(dt.year, min_rel_dt[0], min_rel_dt[1])
max_dt = datetime(dt.year, max_rel_dt[0], max_rel_dt[1])
else:
#If relative dates span Jan 1
if dt_doy >= min_doy:
min_dt = datetime(dt.year, min_rel_dt[0], min_rel_dt[1])
max_dt = datetime(dt.year + 1, max_rel_dt[0], max_rel_dt[1])
else:
min_dt = datetime(dt.year - 1, min_rel_dt[0], min_rel_dt[1])
max_dt = datetime(dt.year, max_rel_dt[0], max_rel_dt[1])
out = (dt >= min_dt) & (dt <= max_dt)
else:
out = False
return out
def rel_dt_list_idx(dt_list, min_rel_dt=(1,1), max_rel_dt=(12,31)):
return [rel_dt_test(dt, min_rel_dt, max_rel_dt) for dt in dt_list]
def dt_check(dt):
return isinstance(dt, datetime)
def seconds2timedelta(s):
return timedelta(seconds=s)
def timedelta2decdays(d):
return d.total_seconds()/86400.
def timedelta2decyear(d):
return d.total_seconds()/spy
def timedelta_div(t, d):
return timedelta(seconds=t.total_seconds()/float(d))
#Return center date between two datetime
#Useful for velocity maps
def center_date(dt1, dt2):
#return dt1 + (dt2 - dt1)/2
return mean_date([dt1, dt2])
def mean_date(dt_list):
"""Calcuate mean datetime from datetime list
"""
dt_list_sort = sorted(dt_list)
dt_list_sort_rel = [dt - dt_list_sort[0] for dt in dt_list_sort]
avg_timedelta = sum(dt_list_sort_rel, timedelta())/len(dt_list_sort_rel)
return dt_list_sort[0] + avg_timedelta
def median_date(dt_list):
"""Calcuate median datetime from datetime list
"""
#dt_list_sort = sorted(dt_list)
idx = len(dt_list)/2
if len(dt_list) % 2 == 0:
md = mean_date([dt_list[idx-1], dt_list[idx]])
else:
md = dt_list[idx]
return md
def mid_date(dt_list):
dt1 = min(dt_list)
dt2 = max(dt_list)
return dt1 + (dt2 - dt1)/2
def dt_ptp(dt_list):
dt_list_sort = sorted(dt_list)
ptp = dt_list_sort[-1] - dt_list_sort[0]
ndays = ptp.total_seconds()/86400.0
return ndays
def uniq_days_dt(dt_list):
o_list, idx = uniq_days_o(dt_list)
return o2dt(o_list), idx
def uniq_days_o(dt_list):
if not isinstance(dt_list[0], float):
o_list = dt2o(dt_list)
else:
o_list = dt_list
#o_list_sort = np.sort(o_list)
#o_list_sort_idx = np.argsort(o_list)
#Round down to nearest day
o_list_uniq, idx = np.unique(np.floor(o_list), return_index=True)
return o_list_uniq, idx
def round_dt(dt):
dt += timedelta(seconds=86400/2.)
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
return dt
def dt_range(dt1, dt2, interval):
total_dt = dt2 - dt1
nint = int((float(total_dt.total_seconds())/interval.total_seconds())+0.999)
out = dt1 + np.arange(nint) * interval
return out
def dt_cluster(dt_list, dt_thresh=16.0):
"""Find clusters of similar datetimes within datetime list
"""
if not isinstance(dt_list[0], float):
o_list = dt2o(dt_list)
else:
o_list = dt_list
o_list_sort = np.sort(o_list)
o_list_sort_idx = np.argsort(o_list)
d = np.diff(o_list_sort)
#These are indices of breaks
#Add one so each b starts a cluster
b = np.nonzero(d > dt_thresh)[0] + 1
#Add one to shape so we include final index
b = np.hstack((0, b, d.shape[0] + 1))
f_list = []
for i in range(len(b)-1):
#Need to subtract 1 here to give cluster bounds
b_idx = [b[i], b[i+1]-1]
b_dt = o_list_sort[b_idx]
#These should be identical if input is already sorted
b_idx_orig = o_list_sort_idx[b_idx]
all_idx = np.arange(b_idx[0], b_idx[1])
all_sort = o_list_sort[all_idx]
#These should be identical if input is already sorted
all_idx_orig = o_list_sort_idx[all_idx]
dict = {}
dict['break_indices'] = b_idx_orig
dict['break_ts_o'] = b_dt
dict['break_ts_dt'] = o2dt(b_dt)
dict['all_indices'] = all_idx_orig
dict['all_ts_o'] = all_sort
dict['all_ts_dt'] = o2dt(all_sort)
f_list.append(dict)
return f_list
#Seconds since epoch
def sinceEpoch(dt):
return time.mktime(dt.timetuple())
#These can have some issues with daylight savings (sigh)
def dt2decyear(dt):
"""Convert datetime to decimal year
"""
year = dt.year
startOfThisYear = datetime(year=year, month=1, day=1)
startOfNextYear = datetime(year=year+1, month=1, day=1)
yearElapsed = sinceEpoch(dt) - sinceEpoch(startOfThisYear)
yearDuration = sinceEpoch(startOfNextYear) - sinceEpoch(startOfThisYear)
fraction = yearElapsed/yearDuration
return year + fraction
def decyear2dt(t):
"""Convert decimal year to datetime
"""
year = int(t)
rem = t - year
base = datetime(year, 1, 1)
dt = base + timedelta(seconds=(base.replace(year=base.year+1) - base).total_seconds() * rem)
#This works for np array input
#year = t.astype(int)
#rem = t - year
#base = np.array([datetime(y, 1, 1) for y in year])
return dt
#Better to use astro libe or jdcal for julian to gregorian conversions
#Source: http://code.activestate.com/recipes/117215/
def dt2jd(dt):
"""Convert datetime to julian date
"""
a = (14 - dt.month)//12
y = dt.year + 4800 - a
m = dt.month + 12*a - 3
return dt.day + ((153*m + 2)//5) + 365*y + y//4 - y//100 + y//400 - 32045
def jd2dt(jd):
"""Convert julian date to datetime
"""
n = int(round(float(jd)))
a = n + 32044
b = (4*a + 3)//146097
c = a - (146097*b)//4
d = (4*c + 3)//1461
e = c - (1461*d)//4
m = (5*e + 2)//153
day = e + 1 - (153*m + 2)//5
month = m + 3 - 12*(m//10)
year = 100*b + d - 4800 + m/10
tfrac = 0.5 + float(jd) - n
tfrac_s = 86400.0 * tfrac
minfrac, hours = np.modf(tfrac_s / 3600.)
secfrac, minutes = np.modf(minfrac * 60.)
microsec, seconds = np.modf(secfrac * 60.)
return datetime(year, month, day, int(hours), int(minutes), int(seconds), int(microsec*1E6))
#This has not been tested
def gps2dt(gps_week, gps_ms):
"""Convert GPS week and ms to a datetime
"""
gps_epoch = datetime(1980,1,6,0,0,0)
gps_week_s = timedelta(seconds=gps_week*7*24*60*60)
gps_ms_s = timedelta(milliseconds=gps_ms)
return gps_epoch + gps_week_s + gps_ms_s
def mat2dt(o):
"""Convert Matlab ordinal to Python datetime
Need to account for AD 0 and AD 1 discrepancy between the two: http://sociograph.blogspot.com/2011/04/how-to-avoid-gotcha-when-converting.html
python_datetime = datetime.fromordinal(int(o)) + timedelta(days=o%1) - timedelta(days = 366)
"""
return o2dt(o) - timedelta(days=366)
#Python datetime to matlab ordinal
def dt2mat(dt):
"""Convert Python datetime to Matlab ordinal
"""
return dt2o(dt + timedelta(days=366))
#note
#If ma, need to set fill value to 0 when converting to ordinal
def dt2o(dt):
"""Convert datetime to Python ordinal
"""
#return datetime.toordinal(dt)
#This works for arrays of dt
#return np.array(matplotlib.dates.date2num(dt))
return matplotlib.dates.date2num(dt)
#Need to split ordinal into integer and decimal parts
def o2dt(o):
"""Convert Python ordinal to datetime
"""
#omod = np.modf(o)
#return datetime.fromordinal(int(omod[1])) + timedelta(days=omod[0])
#Note: num2date returns dt or list of dt
#This funciton should always return a list
#return np.array(matplotlib.dates.num2date(o))
return matplotlib.dates.num2date(o)
#Return integer DOY (julian)
def dt2doy(dt):
"""Convert datetime to integer DOY (Julian)
"""
#return int(dt.strftime('%j'))
return int(dt.timetuple().tm_yday)
#Year and day of year to datetime
#Add comment to http://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
#ordinal allows for days>365 and decimal days
def doy2dt(yr, j):
"""Convert year + integer DOY (Julian) to datetime
"""
return o2dt(dt2o(datetime(int(yr), 1, 1))+j-1)
#The solution below can't deal with jd>365
#jmod = np.modf(j)
#return datetime.strptime(str(yr)+str(int(jmod[1])), '%Y%j') + timedelta(days=jmod[0])
def print_dt(dt):
return dt.strftime('%Y%m%d_%H%M')
#Generate a new files with time ordinal written to every pixel
#If dt_ref is provided, return time interval in decimal days
#Should add functionality to do relative doy
def gen_ts_fn(fn, dt_ref=None, ma=False):
from osgeo import gdal
from pygeotools.lib import iolib
print("Generating timestamp for: %s" % fn)
fn_ts = os.path.splitext(fn)[0]+'_ts.tif'
if not os.path.exists(fn_ts) or dt_ref is not None:
ds = gdal.Open(fn)
#Should be ok with float ordinals here
a = iolib.ds_getma(ds)
ts = fn_getdatetime(fn)
#Want to check that dt_ref is valid datetime object
if dt_ref is not None:
t = ts - dt_ref
t = t.total_seconds()/86400.
fn_ts = os.path.splitext(fn)[0]+'_ts_rel.tif'
else:
t = dt2o(ts)
a[~np.ma.getmaskarray(a)] = t
#Probably want to be careful about ndv here - could be 0 for rel
#ndv = 1E20
ndv = -9999.0
a.set_fill_value(ndv)
iolib.writeGTiff(a, fn_ts, ds)
if ma:
return a
else:
return fn_ts
#Convert date listed in .meta to timestamp
#'Central Julian Date (CE) for Pair'
def tsx_cdate(t1):
#print jd2dt(t1).strftime('%Y%m%d')
#print jd2dt(t1).strftime('%Y%m%d_%H%M')
return jd2dt(t1)
def tsx_cdate_print(t1):
print(tsx_cdate(t1).strftime('%Y%m%d_%H%M'))
#Matlab to python o = matlab - 366
#Launch was June 15, 2007 at 0214 UTC
#Repeat is 11 days, period is 95 minutes
#Orbit 8992, 2454870.406, Jan:27:2009, Feb:18:2009, 09:43:59
#Orbit 40703, 2456953.658, Oct:17:2014, Oct:28:2014, 03:47:50
#This isn't perfect
def tsx_orbit2dt(orbits):
refdate = 733936.4245-365
reforbit = 5516
orbdates = refdate + (orbits-reforbit)*11./167. + 5.5
return mat2dt(orbdates)
#np vectorize form of functions
#Should clean these up - most can probably be handled directly using np arrays and np.datetime64
np_mat2dt = np.vectorize(mat2dt)
np_dt2mat = np.vectorize(dt2mat)
np_dt2o = np.vectorize(dt2o)
np_o2dt = np.vectorize(o2dt)
np_doy2dt = np.vectorize(doy2dt)
np_dt2doy = np.vectorize(dt2doy)
np_decyear2dt = np.vectorize(decyear2dt)
np_dt2decyear = np.vectorize(dt2decyear)
np_utc2dt = np.vectorize(datetime.utcfromtimestamp)
np_print_dt = np.vectorize(print_dt)
| |
#!/bin/python
#coding:utf-8
import logging
import random
from functools import cmp_to_key
import roomai
import roomai.games.common
from roomai.games.texasholdem import *
class TexasHoldemEnv(roomai.games.common.AbstractEnv):
'''
The TexasHoldem game environment
'''
params=dict()#@override
def init(self, params = dict()):
'''
Initialize the TexasHoldem game environment with the initialization params.\n
The initialization is a dict with some options\n
1. param_num_normal_players: how many players are in the game, default 3\n
2. param_dealer_id: the player id of the dealer, default random\n
3. param_init_chips: the initialization chips, default [1000,1000,...]\n
4. param_big_blind_bet: the number of chips for the big blind bet, default 10\n
An example of the initialization param is {"param_num_normal_players":2,"backward_enable":True}
:param params: the initialization params
:return: infos, public_state, person_states, private_state
'''
logger = roomai.get_logger()
public_state = TexasHoldemStatePublic()
self.__public_state_history__.append(public_state)
if "param_num_normal_players" in params:
public_state.__param_num_normal_players__ = params["param_num_normal_players"]
else:
public_state.__param_num_normal_players__ = 3
if "param_dealer_id" in params:
public_state.__param_dealer_id__ = params["param_dealer_id"]
else:
public_state.__param_dealer_id__ = int(random.random() * public_state.param_num_normal_players)
if "param_init_chips" in params:
public_state.__param_init_chips__ = params["param_init_chips"]
else:
public_state.__param_init_chips__ = [1000 for i in range(public_state.param_num_normal_players)]
if "param_big_blind_bet" in params:
public_state.__param_big_blind_bet__ = params["param_big_blind_bet"]
else:
public_state.__param_big_blind_bet__ = 10
## check initialization config
if len(public_state.param_init_chips) != public_state.param_num_normal_players:
raise ValueError("len(env.param_initialization_chips) %d != param_num_normal_players %d" % (len(public_state.param_init_chips), public_state.param_num_normal_players))
if public_state.param_num_normal_players > 6:
raise ValueError(
"The maximum of the number of players is 6. Now, the number of players = %d" % (public_state.param_num_normal_players))
## public info
small = (public_state.param_dealer_id + 1) % public_state.param_num_normal_players
big = (public_state.param_dealer_id + 2) % public_state.param_num_normal_players
pu = public_state
pu.__is_fold__ = [False for i in range(public_state.param_num_normal_players)]
pu.__num_fold__ = 0
pu.__is_allin__ = [False for i in range(public_state.param_num_normal_players)]
pu.__num_allin__ = 0
pu.__is_needed_to_action__ = [True for i in range(public_state.param_num_normal_players)]
pu.__num_needed_to_action__ = pu.param_num_normal_players
pu.__bets__ = [0 for i in range(public_state.param_num_normal_players)]
pu.__chips__ = list(public_state.param_init_chips)
pu.__stage__ = Stage.firstStage
pu.__turn__ = pu.param_num_normal_players
pu.__public_cards__ = []
pu.__previous_id__ = None
pu.__previous_action__ = None
if pu.chips[big] > public_state.param_big_blind_bet:
pu.__chips__[big] -= public_state.param_big_blind_bet
pu.__bets__[big] += public_state.param_big_blind_bet
else:
pu.__bets__[big] = pu.chips[big]
pu.__chips__[big] = 0
pu.__is_allin__[big] = True
pu.__num_allin__ += 1
pu.__max_bet_sofar__ = pu.bets[big]
pu.__raise_account__ = public_state.param_big_blind_bet
if pu.chips[small] > public_state.param_big_blind_bet / 2:
pu.__chips__[small] -= public_state.param_big_blind_bet /2
pu.__bets__[small] += public_state.param_big_blind_bet /2
else:
pu.__bets__[small] = pu.chips[small]
pu.__chips__[small] = 0
pu.__is_allin__[small] = True
pu.__num_allin__ += 1
pu.__is_terminal__ = False
pu.__scores__ = [0 for i in range(public_state.param_num_normal_players)]
# private info
pr = TexasHoldemStatePrivate()
self.__private_state_history__.append(pr)
pr.__keep_cards__ = []
##pr.__keep_cards__ =allcards[public_state.param_num_normal_players*2:public_state.param_num_normal_players*2+5]
## person info
self.__person_states_history__ = [[] for i in range(pu.param_num_normal_players + 1)]
for i in range(pu.param_num_normal_players + 1):
self.__person_states_history__[i].append(TexasHoldemStatePerson())
self.__person_states_history__[i][0].__id__ = i
self.__person_states_history__[i][0].__hand_cards__ = []
self.__person_states_history__[pu.turn][0].__available_actions__ = self.available_actions()
infos = self.__gen_infos__()
if logger.level <= logging.DEBUG:
logger.debug("TexasHoldemEnv.init: param_num_normal_players = %d, param_dealer_id = %d, param_initialization_chip = %d, param_big_blind_bet = %d"%(\
pu.param_num_normal_players,\
pu.param_dealer_id,\
pu.param_init_chips[0],\
pu.param_big_blind_bet
))
return infos, self.__public_state_history__, self.__person_states_history__, self.__private_state_history__, self.__playerid_action_history__
## we need ensure the action is valid
#@Overide
def forward(self, action):
'''
The TexasHoldem game environments steps with the action taken by the current player
:param action: The action taken by the current player
:return: infos, public_state, person_states, private_state
'''
logger = roomai.get_logger()
pu = self.__public_state_history__[-1].__deepcopy__()
pes = [self.__person_states_history__[i][-1].__deepcopy__() for i in range(len(self.__person_states_history__))]
pr = self.__private_state_history__[-1].__deepcopy__()
self.__public_state_history__.append(pu)
for i in range(len(pes)):
self.__person_states_history__[i].append(pes[i])
self.__private_state_history__.append(pr)
if action.key not in pes[pu.turn].available_actions:
logger.critical("action=%s is invalid" % (action.key))
raise ValueError("action=%s is invalid" % (action.key))
pes[pu.turn].__available_actions__ = dict()
self.__playerid_action_history__.append(roomai.games.common.ActionRecord(pu.turn,action))
if isinstance(action, TexasHoldemActionChance) == True:
self.__action_chance__(action)
if len(pr.all_used_cards) == (len(pes)-1) * 2 + 5:
pu.__turn__ = (pu.param_dealer_id + 2 + 1)%pu.param_num_normal_players
pes[pu.turn].__available_actions__ = self.available_actions()
infos = self.__gen_infos__()
return infos, self.__public_state_history__, self.__person_states_history__, self.__private_state_history__, self.__playerid_action_history__
if action.option == TexasHoldemAction.Fold:
self.__action_fold__(action)
elif action.option == TexasHoldemAction.Check:
self.__action_check__(action)
elif action.option == TexasHoldemAction.Call:
self.__action_call__(action)
elif action.option == TexasHoldemAction.Raise:
self.__action_raise__(action)
elif action.option == TexasHoldemAction.AllIn:
self.__action_allin__(action)
else:
raise Exception("action.option(%s) not in [Fold, Check, Call, Raise, AllIn]"%(action.option))
pu.__previous_id__ = pu.turn
pu.__previous_action__ = action
pu.__is_terminal__ = False
pu.__scores__ = [0 for i in range(self.__public_state_history__[-1].param_num_normal_players)]
# computing_score
if TexasHoldemEnv.__is_compute_scores__(self.__public_state_history__[-1]):
## need showdown
pu.__public_cards__ = pr.keep_cards[0:5]
pu.__is_terminal__ = True
pu.__scores__ = self.__compute_scores__()
# enter into the next stage
elif TexasHoldemEnv.__is_nextround__(self.__public_state_history__[-1]):
add_cards = []
if pu.stage == Stage.firstStage: add_cards = pr.keep_cards[0:3]
if pu.stage == Stage.secondStage: add_cards = [pr.keep_cards[3]]
if pu.stage == Stage.thirdStage: add_cards = [pr.keep_cards[4]]
pu.__public_cards__.extend(add_cards)
pu.__stage__ = pu.stage + 1
pu.__num_needed_to_action__ = 0
pu.__is_needed_to_action__ = [False for i in range(pu.param_num_normal_players)]
for i in range(pu.param_num_normal_players):
if pu.__is_fold__[i] != True and pu.__is_allin__[i] != True:
pu.__is_needed_to_action__[i] = True
pu.__num_needed_to_action__ += 1
pu.__turn__ = pu.param_dealer_id
pu.__turn__ = self.__next_player__(pu)
pes[self.__public_state_history__[-1].turn].__available_actions__ = self.available_actions()
##normal
else:
pu.__turn__ = self.__next_player__(pu)
self.__person_states_history__[self.__public_state_history__[-1].turn][-1].__available_actions__ = self.available_actions()
logger = roomai.get_logger()
if logger.level <= logging.DEBUG:
logger.debug("TexasHoldemEnv.forward: num_fold+num_allin = %d+%d = %d, action = %s, stage = %d"%(\
self.__public_state_history__[-1].num_fold,\
self.__public_state_history__[-1].num_allin,\
self.__public_state_history__[-1].num_fold + self.__public_state_history__[-1].num_allin,\
action.key,\
self.__public_state_history__[-1].stage\
))
infos = self.__gen_infos__()
return infos, self.__public_state_history__, self.__person_states_history__, self.__private_state_history__, self.__playerid_action_history__
def available_actions(self):
'''
Generate all valid actions given the public state and the person state
:return: all valid actions
'''
pu = self.__public_state_history__[-1]
pes = [self.__person_states_history__[i][-1] for i in range(len(self.__person_states_history__))]
pr = self.__private_state_history__[-1]
if len(pr.all_used_cards) < (len(pes)-1) * 2 + 5:
candidate_chance_actions = dict()
all_used_card_keys = set([c.key for c in pr.all_used_cards])
for card_key in AllPokerCardsDict:
if card_key not in all_used_card_keys:
chance_action = TexasHoldemActionChance.lookup(card_key)
candidate_chance_actions[chance_action.key] = chance_action
return candidate_chance_actions
pe = pes[pu.turn]
turn = pu.turn
key_actions = dict()
if pu.turn != pe.id:
return dict()
if pu.is_allin[turn] == True or pu.is_fold[turn] == True:
return dict()
if pu.chips[turn] == 0:
return dict()
## for fold
action = TexasHoldemAction.lookup(TexasHoldemAction.Fold + "_0")
# if cls.is_action_valid(action,public_state, person_state):
key_actions[action.key] = action
## for check
if pu.bets[turn] == pu.max_bet_sofar:
action = TexasHoldemAction.lookup(TexasHoldemAction.Check + "_0")
# if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
## for call
if pu.bets[turn] != pu.max_bet_sofar and pu.chips[turn] > pu.max_bet_sofar - pu.bets[turn]:
action = TexasHoldemAction.lookup(TexasHoldemAction.Call + "_%d" % (pu.max_bet_sofar - pu.bets[turn]))
# if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
## for raise
# if pu.bets[turn] != pu.max_bet_sofar and \
if pu.chips[turn] > pu.max_bet_sofar - pu.bets[turn] + pu.raise_account:
num = int((pu.chips[turn] - (pu.max_bet_sofar - pu.bets[turn])) / pu.raise_account)
for i in range(1, num + 1):
price = pu.max_bet_sofar - pu.bets[turn] + pu.raise_account * i
if price == pu.chips[pu.turn]: continue
action = TexasHoldemAction.lookup(TexasHoldemAction.Raise + "_%d" % (price))
# if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
## for all in
action = TexasHoldemAction.lookup(TexasHoldemAction.AllIn + "_%d" % (pu.chips[turn]))
# if cls.is_action_valid(action, public_state, person_state):
key_actions[action.key] = action
return key_actions
def __compute_scores__(self):
pu = self.__public_state_history__[-1]
pes = [self.__person_states_history__[i][-1] for i in range(len(self.__person_states_history__))]
pr = self.__private_state_history__[-1]
## compute score before showdown, the winner takes all
if pu.param_num_normal_players == pu.num_fold + 1:
scores = [0 for i in range(pu.param_num_normal_players)]
for i in range(pu.param_num_normal_players):
if pu.is_fold[i] == False:
scores[i] = sum(pu.bets)
break
## compute score after showdown
else:
scores = [0 for i in range(pu.param_num_normal_players)]
playerid_pattern_bets = [] #for not_quit players
for i in range(pu.param_num_normal_players):
if pu.is_fold[i] == True: continue
hand_pattern_cards = self.__cards2pattern_cards__(pes[i].hand_cards, pr.keep_cards)
playerid_pattern_bets.append((i,hand_pattern_cards,pu.bets[i]))
for playerid_pattern_bet in playerid_pattern_bets:
if len(playerid_pattern_bet[1][1]) < 5:
i = 0
playerid_pattern_bets.sort(key=lambda x:self.compute_rank_pattern_cards(x[1]))
pot_line = 0
previous = None
tmp_playerid_pattern_bets = []
for i in range(len(playerid_pattern_bets)-1,-1,-1):
if previous == None:
tmp_playerid_pattern_bets.append(playerid_pattern_bets[i])
previous = playerid_pattern_bets[i]
elif self.__compare_patterns_cards__(playerid_pattern_bets[i][1], previous[1]) == 0:
tmp_playerid_pattern_bets.append(playerid_pattern_bets[i])
previous = playerid_pattern_bets[i]
else:
tmp_playerid_pattern_bets.sort(key = lambda x:x[2])
for k in range(len(tmp_playerid_pattern_bets)):
num1 = len(tmp_playerid_pattern_bets) - k
sum1 = 0
max_win_score = pu.bets[tmp_playerid_pattern_bets[k][0]]
for p in range(pu.param_num_normal_players):
sum1 += min(max(0, pu.bets[p] - pot_line), max_win_score)
for p in range(k, len(tmp_playerid_pattern_bets)):
scores[tmp_playerid_pattern_bets[p][0]] += sum1 / num1
scores[pu.param_dealer_id] += sum1 % num1
if pot_line <= max_win_score:
pot_line = max_win_score
tmp_playerid_pattern_bets = []
tmp_playerid_pattern_bets.append(playerid_pattern_bets[i])
previous = playerid_pattern_bets[i]
if len(tmp_playerid_pattern_bets) > 0:
tmp_playerid_pattern_bets.sort(key = lambda x:x[2])
for i in range(len(tmp_playerid_pattern_bets)):
num1 = len(tmp_playerid_pattern_bets) - i
sum1 = 0
max_win_score = pu.bets[tmp_playerid_pattern_bets[i][0]]
for p in range(pu.param_num_normal_players):
sum1 += min(max(0, pu.bets[p] - pot_line), max_win_score)
for p in range(i, len(tmp_playerid_pattern_bets)):
scores[tmp_playerid_pattern_bets[p][0]] += sum1 / num1
scores[pu.param_dealer_id] += sum1 % num1
if pot_line <= max_win_score: pot_line = max_win_score
for p in range(pu.param_num_normal_players):
pu.__chips__[p] += scores[p]
scores[p] -= pu.bets[p]
for p in range(pu.param_num_normal_players):
scores[p] /= pu.param_big_blind_bet * 1.0
return scores
def __action_chance__(self, action):
pu = self.__public_state_history__[-1]
pes = [self.__person_states_history__[i][-1] for i in range(len(self.__person_states_history__))]
pr = self.__private_state_history__[-1]
card = action.card
num = len(pr.all_used_cards)
if num < (len(pes)-1) * 2:
idx = int(num / 2)
pes[idx].__hand_cards__.append(card)
elif num < (len(pes)-1) * 2 + 5:
pr.__keep_cards__.append(card)
else:
logger = roomai.get_logger()
logger.fatal("the chance action in the invalid condition")
pr.__all_used_cards__.append(card)
def __action_fold__(self, action):
pu = self.__public_state_history__[-1]
pu.__is_fold__[pu.turn] = True
pu.__num_fold__ += 1
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
def __action_check__(self, action):
pu = self.__public_state_history__[-1]
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
def __action_call__(self, action):
pu = self.__public_state_history__[-1]
pu.__chips__[pu.turn] -= action.price
pu.__bets__[pu.turn] += action.price
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
def __action_raise__(self, action):
pu = self.__public_state_history__[-1]
pu.__raise_account__ = action.price + pu.bets[pu.turn] - pu.max_bet_sofar
pu.__chips__[pu.turn] -= action.price
pu.__bets__[pu.turn] += action.price
pu.__max_bet_sofar__ = pu.bets[pu.turn]
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
p = (pu.turn + 1)%pu.param_num_normal_players
while p != pu.turn:
if pu.is_allin[p] == False and pu.is_fold[p] == False and pu.is_needed_to_action[p] == False:
pu.__num_needed_to_action__ += 1
pu.__is_needed_to_action__[p] = True
p = (p + 1) % pu.param_num_normal_players
def __action_allin__(self, action):
pu = self.__public_state_history__[-1]
pu.__is_allin__[pu.turn] = True
pu.__num_allin__ += 1
pu.__bets__[pu.turn] += action.price
pu.__chips__[pu.turn] = 0
pu.__is_needed_to_action__[pu.turn] = False
pu.__num_needed_to_action__ -= 1
if pu.bets[pu.turn] > pu.max_bet_sofar:
pu.__max_bet_sofar__ = pu.bets[pu.turn]
p = (pu.turn + 1) % pu.param_num_normal_players
while p != pu.turn:
if pu.is_allin[p] == False and pu.is_fold[p] == False and pu.is_needed_to_action[p] == False:
pu.__num_needed_to_action__ += 1
pu.__is_needed_to_action__[p] = True
p = (p + 1) % pu.param_num_normal_players
pu.__max_bet_sofar__ = pu.bets[pu.turn]
#####################################Utils Function ##############################
# override
@classmethod
def compete_silent(cls, env, players):
'''
Use the game environment to hold a compete_silent for the players
:param env: The game environment
:param players: The normal players (without the chance player)
:return: scores for the players
'''
total_scores = [0 for i in range(len(players))]
total_count = 10
num_normal_players = len(players)
players = players + [roomai.games.common.RandomPlayerChance()]
for count in range(total_count):
chips = [(1000 + int(random.random() * 200)) for i in range(len(players))]
dealer_id = int(random.random() * len(players))
big_blind_bet = 50
infos, public, persons, private, action_history = env.init({"chips": chips,
"param_num_normal_players": num_normal_players,
"dealer_id": dealer_id,
"big_blind_bet": big_blind_bet})
for i in range(len(players)):
players[i].receive_info(infos[i])
while public[-1].is_terminal == False:
turn = public[-1].turn
action = players[turn].take_action()
# print len(infos[turn].person_state.available_actions),action.key(),turn
infos, public, persons, private, _= env.forward(action)
for i in range(len(players)):
players[i].receive_info(infos[i])
for i in range(len(players)):
players[i].receive_info(infos[i])
for i in range(num_normal_players):
total_scores[i] += public[-1].scores[i]
if (count + 1) % 500 == 0:
tmp_scores = [0 for i in range(len(total_scores))]
for i in range(len(total_scores)):
tmp_scores[i] = total_scores[i] / (count + 1)
roomai.get_logger().info("TexasHoldem completes %d competitions, scores=%s" % (
count + 1, ",".join([str(i) for i in tmp_scores])))
for i in range(len(total_scores)):
total_scores[i] /= 1.0 * total_count
return total_scores
@classmethod
def __next_player__(self, pu):
i = pu.turn
if pu.num_needed_to_action == 0:
return -1
p = (i+1)%pu.param_num_normal_players
while pu.is_needed_to_action[p] == False:
p = (p+1)%pu.param_num_normal_players
return p
@classmethod
def __is_compute_scores__(self, pu):
'''
:return: A boolean variable indicates whether is it time to compute scores
'''
if pu.param_num_normal_players == pu.num_fold + 1:
return True
# below need showdown
if pu.param_num_normal_players <= pu.num_fold + pu.num_allin +1 and pu.num_needed_to_action == 0:
return True
if pu.stage == Stage.fourthStage and self.__is_nextround__(pu):
return True
return False
@classmethod
def __is_nextround__(self, public_state):
'''
:return: A boolean variable indicates whether is it time to enter the next stage
'''
return public_state.num_needed_to_action == 0
@classmethod
def __cards2pattern_cards__(cls, hand_cards, remaining_cards):
key = cmp_to_key(roomai.games.texasholdem.PokerCard.compare)
pointrank2cards = dict()
for c in hand_cards + remaining_cards:
if c.point_rank in pointrank2cards:
pointrank2cards[c.point_rank].append(c)
else:
pointrank2cards[c.point_rank] = [c]
for p in pointrank2cards:
pointrank2cards[p].sort(key = key)
suitrank2cards = dict()
for c in hand_cards + remaining_cards:
if c.suit_rank in suitrank2cards:
suitrank2cards[c.suit_rank].append(c)
else:
suitrank2cards[c.suit_rank] = [c]
for s in suitrank2cards:
suitrank2cards[s].sort(key=key)
num2point = [[], [], [], [], []]
for p in pointrank2cards:
num = len(pointrank2cards[p])
num2point[num].append(p)
for i in range(5):
num2point[num].sort()
sorted_point = []
for p in pointrank2cards:
sorted_point.append(p)
sorted_point.sort()
##straight_samesuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
numStraight = 1
for i in range(len(suitrank2cards[s]) - 2, -1, -1):
if suitrank2cards[s][i].point_rank == suitrank2cards[s][i + 1].point_rank - 1:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = AllCardsPattern["Straight_SameSuit"]
return (pattern,suitrank2cards[s][i:i + 5])
##4_1
if len(num2point[4]) > 0:
p4 = num2point[4][0]
p1 = -1
for i in range(len(sorted_point) - 1, -1, -1):
if sorted_point[i] != p4:
p1 = sorted_point[i]
break
pattern = AllCardsPattern["4_1"]
cards = pointrank2cards[p4][0:4]
cards.append(pointrank2cards[p1][0])
return (pattern,cards)
##3_2
if len(num2point[3]) >= 1:
pattern = AllCardsPattern["3_2"]
if len(num2point[3]) == 2:
p3 = num2point[3][1]
cards = pointrank2cards[p3][0:3]
p2 = num2point[3][0]
cards.append(pointrank2cards[p2][0])
cards.append(pointrank2cards[p2][1])
return (pattern,cards)
if len(num2point[2]) >= 1:
p3 = num2point[3][0]
cards = pointrank2cards[p3][0:3]
p2 = num2point[2][len(num2point[2]) - 1]
cards.append(pointrank2cards[p2][0])
cards.append(pointrank2cards[p2][1])
return (pattern,cards)
##SameSuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
pattern = AllCardsPattern["SameSuit"]
len1 = len(suitrank2cards[s])
cards = suitrank2cards[s][len1 - 5:len1]
return (pattern,cards)
##Straight_DiffSuit
numStraight = 1
for idx in range(len(sorted_point) - 2, -1, -1):
if sorted_point[idx] + 1 == sorted_point[idx]:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = AllCardsPattern["Straight_DiffSuit"]
cards = []
for p in range(idx, idx + 5):
point = sorted_point[p]
cards.append(pointrank2cards[point][0])
return (pattern,cards)
##3_1_1
if len(num2point[3]) == 1:
pattern = AllCardsPattern["3_1_1"]
p3 = num2point[3][0]
cards = pointrank2cards[p3][0:3]
num = 0
for i in range(len(sorted_point) - 1, -1, -1):
p = sorted_point[i]
if p != p3:
cards.append(pointrank2cards[p][0])
num += 1
if num == 2: break
return (pattern,cards)
##2_2_1
if len(num2point[2]) >= 2:
pattern = AllCardsPattern["2_2_1"]
p21 = num2point[2][len(num2point[2]) - 1]
cards = []
for c in pointrank2cards[p21]:
cards.append(c)
p22 = num2point[2][len(num2point[2]) - 2]
for c in pointrank2cards[p22]:
cards.append(c)
flag = False
for i in range(len(sorted_point) - 1, -1, -1):
p = sorted_point[i]
if p != p21 and p != p22:
c = pointrank2cards[p][0]
cards.append(c)
flag = True
if flag == True: break;
return (pattern,cards)
##2_1_1_1
if len(num2point[2]) == 1:
pattern = AllCardsPattern["2_1_1_1"]
p2 = num2point[2][0]
cards = pointrank2cards[p2][0:2]
num = 0
for p in range(len(sorted_point) - 1, -1, -1):
p1 = sorted_point[p]
if p1 != p2:
cards.append(pointrank2cards[p1][0])
if num == 3: break
return (pattern,cards)
##1_1_1_1_1
pattern = AllCardsPattern["1_1_1_1_1"]
count = 0
cards = []
for i in range(len(sorted_point) - 1, -1, -1):
p = sorted_point[i]
for c in pointrank2cards[p]:
cards.append(c)
count += 1
if count == 5: break
if count == 5: break
return (pattern,cards)
@classmethod
def __compare_handcards__(cls, hand_card0, hand_card1, keep_cards):
pattern0 = TexasHoldemEnv.__cards2pattern_cards__(hand_card0, keep_cards)
pattern1 = TexasHoldemEnv.__cards2pattern_cards__(hand_card1, keep_cards)
diff = cls.__compare_patterns_cards__(pattern0, pattern1)
return diff
@classmethod
def compute_rank_pattern_cards(cls, pattern_cards):
rank = pattern_cards[0][5] * 1000
for i in range(5):
rank *= 1000
rank += pattern_cards[1][i].point_rank
return rank
@classmethod
def __compare_patterns_cards__(cls, p1, p2):
return cls.compute_rank_pattern_cards(p1) - cls.compute_rank_pattern_cards(p2)
def __deepcopy__(self, memodict={}, newinstance = None):
if newinstance is None:
newinstance = TexasHoldemEnv()
newinstance = super(TexasHoldemEnv, self).__deepcopy__(newinstance=newinstance)
return newinstance
| |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import netaddr
from tempest.api.network import base
from tempest.api.network import base_security_groups as sec_base
from tempest.common import custom_matchers
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class PortsTestJSON(sec_base.BaseSecGroupTest):
"""
Test the following operations for ports:
port create
port delete
port list
port show
port update
"""
@classmethod
def resource_setup(cls):
super(PortsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.port = cls.create_port(cls.network)
def _delete_port(self, port_id):
self.client.delete_port(port_id)
body = self.client.list_ports()
ports_list = body['ports']
self.assertFalse(port_id in [n['id'] for n in ports_list])
@test.attr(type='smoke')
@test.idempotent_id('c72c1c0c-2193-4aca-aaa4-b1442640f51c')
def test_create_update_delete_port(self):
# Verify port creation
body = self.client.create_port(network_id=self.network['id'])
port = body['port']
# Schedule port deletion with verification upon test completion
self.addCleanup(self._delete_port, port['id'])
self.assertTrue(port['admin_state_up'])
# Verify port update
new_name = "New_Port"
body = self.client.update_port(port['id'],
name=new_name,
admin_state_up=False)
updated_port = body['port']
self.assertEqual(updated_port['name'], new_name)
self.assertFalse(updated_port['admin_state_up'])
@test.idempotent_id('67f1b811-f8db-43e2-86bd-72c074d4a42c')
def test_create_bulk_port(self):
network1 = self.network
name = data_utils.rand_name('network-')
network2 = self.create_network(network_name=name)
network_list = [network1['id'], network2['id']]
port_list = [{'network_id': net_id} for net_id in network_list]
body = self.client.create_bulk_port(port_list)
created_ports = body['ports']
port1 = created_ports[0]
port2 = created_ports[1]
self.addCleanup(self._delete_port, port1['id'])
self.addCleanup(self._delete_port, port2['id'])
self.assertEqual(port1['network_id'], network1['id'])
self.assertEqual(port2['network_id'], network2['id'])
self.assertTrue(port1['admin_state_up'])
self.assertTrue(port2['admin_state_up'])
@classmethod
def _get_ipaddress_from_tempest_conf(cls):
"""Return first subnet gateway for configured CIDR """
if cls._ip_version == 4:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
elif cls._ip_version == 6:
cidr = netaddr.IPNetwork(CONF.network.tenant_network_v6_cidr)
return netaddr.IPAddress(cidr)
@test.attr(type='smoke')
@test.idempotent_id('0435f278-40ae-48cb-a404-b8a087bc09b1')
def test_create_port_in_allowed_allocation_pools(self):
network = self.create_network()
net_id = network['id']
address = self._get_ipaddress_from_tempest_conf()
allocation_pools = {'allocation_pools': [{'start': str(address + 4),
'end': str(address + 6)}]}
subnet = self.create_subnet(network, **allocation_pools)
self.addCleanup(self.client.delete_subnet, subnet['id'])
body = self.client.create_port(network_id=net_id)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
ip_address = port['fixed_ips'][0]['ip_address']
start_ip_address = allocation_pools['allocation_pools'][0]['start']
end_ip_address = allocation_pools['allocation_pools'][0]['end']
ip_range = netaddr.IPRange(start_ip_address, end_ip_address)
self.assertIn(ip_address, ip_range)
@test.attr(type='smoke')
@test.idempotent_id('c9a685bd-e83f-499c-939f-9f7863ca259f')
def test_show_port(self):
# Verify the details of port
body = self.client.show_port(self.port['id'])
port = body['port']
self.assertIn('id', port)
# TODO(Santosh)- This is a temporary workaround to compare create_port
# and show_port dict elements.Remove this once extra_dhcp_opts issue
# gets fixed in neutron.( bug - 1365341.)
self.assertThat(self.port,
custom_matchers.MatchesDictExceptForKeys
(port, excluded_keys=['extra_dhcp_opts']))
@test.idempotent_id('45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd')
def test_show_port_fields(self):
# Verify specific fields of a port
fields = ['id', 'mac_address']
body = self.client.show_port(self.port['id'],
fields=fields)
port = body['port']
self.assertEqual(sorted(port.keys()), sorted(fields))
for field_name in fields:
self.assertEqual(port[field_name], self.port[field_name])
@test.attr(type='smoke')
@test.idempotent_id('cf95b358-3e92-4a29-a148-52445e1ac50e')
def test_list_ports(self):
# Verify the port exists in the list of all ports
body = self.client.list_ports()
ports = [port['id'] for port in body['ports']
if port['id'] == self.port['id']]
self.assertNotEmpty(ports, "Created port not found in the list")
@test.idempotent_id('e7fe260b-1e79-4dd3-86d9-bec6a7959fc5')
def test_port_list_filter_by_ip(self):
# Create network and subnet
network = self.create_network()
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
# Create two ports
port_1 = self.client.create_port(network_id=network['id'])
self.addCleanup(self.client.delete_port, port_1['port']['id'])
port_2 = self.client.create_port(network_id=network['id'])
self.addCleanup(self.client.delete_port, port_2['port']['id'])
# List ports filtered by fixed_ips
port_1_fixed_ip = port_1['port']['fixed_ips'][0]['ip_address']
fixed_ips = 'ip_address=' + port_1_fixed_ip
port_list = self.client.list_ports(fixed_ips=fixed_ips)
# Check that we got the desired port
ports = port_list['ports']
tenant_ids = set([port['tenant_id'] for port in ports])
self.assertEqual(len(tenant_ids), 1,
'Ports from multiple tenants are in the list resp')
port_ids = [port['id'] for port in ports]
fixed_ips = [port['fixed_ips'] for port in ports]
port_ips = []
for addr in fixed_ips:
port_ips.extend([port['ip_address'] for port in addr])
port_net_ids = [port['network_id'] for port in ports]
self.assertIn(port_1['port']['id'], port_ids)
self.assertIn(port_1_fixed_ip, port_ips)
self.assertIn(network['id'], port_net_ids)
@test.idempotent_id('5ad01ed0-0e6e-4c5d-8194-232801b15c72')
def test_port_list_filter_by_router_id(self):
# Create a router
network = self.create_network()
self.addCleanup(self.networks_client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
router = self.create_router(data_utils.rand_name('router-'))
self.addCleanup(self.client.delete_router, router['id'])
port = self.client.create_port(network_id=network['id'])
# Add router interface to port created above
self.client.add_router_interface_with_port_id(
router['id'], port['port']['id'])
self.addCleanup(self.client.remove_router_interface_with_port_id,
router['id'], port['port']['id'])
# List ports filtered by router_id
port_list = self.client.list_ports(device_id=router['id'])
ports = port_list['ports']
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['id'], port['port']['id'])
self.assertEqual(ports[0]['device_id'], router['id'])
@test.idempotent_id('ff7f117f-f034-4e0e-abff-ccef05c454b4')
def test_list_ports_fields(self):
# Verify specific fields of ports
fields = ['id', 'mac_address']
body = self.client.list_ports(fields=fields)
ports = body['ports']
self.assertNotEmpty(ports, "Port list returned is empty")
# Asserting the fields returned are correct
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
@test.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
network = self.create_network()
self.addCleanup(self.networks_client.delete_network, network['id'])
subnet_1 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
subnet_2 = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet_2['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
fixed_ips = fixed_ip_1 + fixed_ip_2
# Create a port with multiple IP addresses
port = self.create_port(network,
fixed_ips=fixed_ips)
self.addCleanup(self.client.delete_port, port['id'])
self.assertEqual(2, len(port['fixed_ips']))
check_fixed_ips = [subnet_1['id'], subnet_2['id']]
for item in port['fixed_ips']:
self.assertIn(item['subnet_id'], check_fixed_ips)
# Update the port to return to a single IP address
port = self.update_port(port, fixed_ips=fixed_ip_1)
self.assertEqual(1, len(port['fixed_ips']))
# Update the port with a second IP address from second subnet
port = self.update_port(port, fixed_ips=fixed_ips)
self.assertEqual(2, len(port['fixed_ips']))
def _update_port_with_security_groups(self, security_groups_names):
subnet_1 = self.create_subnet(self.network)
self.addCleanup(self.client.delete_subnet, subnet_1['id'])
fixed_ip_1 = [{'subnet_id': subnet_1['id']}]
security_groups_list = list()
for name in security_groups_names:
group_create_body = self.client.create_security_group(
name=name)
self.addCleanup(self.client.delete_security_group,
group_create_body['security_group']['id'])
security_groups_list.append(group_create_body['security_group']
['id'])
# Create a port
sec_grp_name = data_utils.rand_name('secgroup')
security_group = self.client.create_security_group(name=sec_grp_name)
self.addCleanup(self.client.delete_security_group,
security_group['security_group']['id'])
post_body = {
"name": data_utils.rand_name('port-'),
"security_groups": [security_group['security_group']['id']],
"network_id": self.network['id'],
"admin_state_up": True,
"fixed_ips": fixed_ip_1}
body = self.client.create_port(**post_body)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
# Update the port with security groups
subnet_2 = self.create_subnet(self.network)
fixed_ip_2 = [{'subnet_id': subnet_2['id']}]
update_body = {"name": data_utils.rand_name('port-'),
"admin_state_up": False,
"fixed_ips": fixed_ip_2,
"security_groups": security_groups_list}
body = self.client.update_port(port['id'], **update_body)
port_show = body['port']
# Verify the security groups and other attributes updated to port
exclude_keys = set(port_show).symmetric_difference(update_body)
exclude_keys.add('fixed_ips')
exclude_keys.add('security_groups')
self.assertThat(port_show, custom_matchers.MatchesDictExceptForKeys(
update_body, exclude_keys))
self.assertEqual(fixed_ip_2[0]['subnet_id'],
port_show['fixed_ips'][0]['subnet_id'])
for security_group in security_groups_list:
self.assertIn(security_group, port_show['security_groups'])
@test.idempotent_id('58091b66-4ff4-4cc1-a549-05d60c7acd1a')
def test_update_port_with_security_group_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup')])
@test.idempotent_id('edf6766d-3d40-4621-bc6e-2521a44c257d')
def test_update_port_with_two_security_groups_and_extra_attributes(self):
self._update_port_with_security_groups(
[data_utils.rand_name('secgroup'),
data_utils.rand_name('secgroup')])
@test.idempotent_id('13e95171-6cbd-489c-9d7c-3f9c58215c18')
def test_create_show_delete_port_user_defined_mac(self):
# Create a port for a legal mac
body = self.client.create_port(network_id=self.network['id'])
old_port = body['port']
free_mac_address = old_port['mac_address']
self.client.delete_port(old_port['id'])
# Create a new port with user defined mac
body = self.client.create_port(network_id=self.network['id'],
mac_address=free_mac_address)
self.addCleanup(self.client.delete_port, body['port']['id'])
port = body['port']
body = self.client.show_port(port['id'])
show_port = body['port']
self.assertEqual(free_mac_address,
show_port['mac_address'])
@test.attr(type='smoke')
@test.idempotent_id('4179dcb9-1382-4ced-84fe-1b91c54f5735')
def test_create_port_with_no_securitygroups(self):
network = self.create_network()
self.addCleanup(self.networks_client.delete_network, network['id'])
subnet = self.create_subnet(network)
self.addCleanup(self.client.delete_subnet, subnet['id'])
port = self.create_port(network, security_groups=[])
self.addCleanup(self.client.delete_port, port['id'])
self.assertIsNotNone(port['security_groups'])
self.assertEmpty(port['security_groups'])
class PortsAdminExtendedAttrsTestJSON(base.BaseAdminNetworkTest):
@classmethod
def setup_clients(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).setup_clients()
cls.identity_client = cls.os_adm.identity_client
@classmethod
def resource_setup(cls):
super(PortsAdminExtendedAttrsTestJSON, cls).resource_setup()
cls.network = cls.create_network()
cls.host_id = socket.gethostname()
@test.idempotent_id('8e8569c1-9ac7-44db-8bc1-f5fb2814f29b')
def test_create_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id'],
"binding:host_id": self.host_id}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
host_id = port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.idempotent_id('6f6c412c-711f-444d-8502-0ac30fbf5dd5')
def test_update_port_binding_ext_attr(self):
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
update_body = {"binding:host_id": self.host_id}
body = self.admin_client.update_port(port['id'], **update_body)
updated_port = body['port']
host_id = updated_port['binding:host_id']
self.assertIsNotNone(host_id)
self.assertEqual(self.host_id, host_id)
@test.idempotent_id('1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8')
def test_list_ports_binding_ext_attr(self):
# Create a new port
post_body = {"network_id": self.network['id']}
body = self.admin_client.create_port(**post_body)
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
# Update the port's binding attributes so that is now 'bound'
# to a host
update_body = {"binding:host_id": self.host_id}
self.admin_client.update_port(port['id'], **update_body)
# List all ports, ensure new port is part of list and its binding
# attributes are set and accurate
body = self.admin_client.list_ports()
ports_list = body['ports']
pids_list = [p['id'] for p in ports_list]
self.assertIn(port['id'], pids_list)
listed_port = [p for p in ports_list if p['id'] == port['id']]
self.assertEqual(1, len(listed_port),
'Multiple ports listed with id %s in ports listing: '
'%s' % (port['id'], ports_list))
self.assertEqual(self.host_id, listed_port[0]['binding:host_id'])
@test.idempotent_id('b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13')
def test_show_port_binding_ext_attr(self):
body = self.admin_client.create_port(network_id=self.network['id'])
port = body['port']
self.addCleanup(self.admin_client.delete_port, port['id'])
body = self.admin_client.show_port(port['id'])
show_port = body['port']
self.assertEqual(port['binding:host_id'],
show_port['binding:host_id'])
self.assertEqual(port['binding:vif_type'],
show_port['binding:vif_type'])
self.assertEqual(port['binding:vif_details'],
show_port['binding:vif_details'])
class PortsIpV6TestJSON(PortsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
class PortsAdminExtendedAttrsIpV6TestJSON(PortsAdminExtendedAttrsTestJSON):
_ip_version = 6
_tenant_network_cidr = CONF.network.tenant_network_v6_cidr
_tenant_network_mask_bits = CONF.network.tenant_network_v6_mask_bits
| |
import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
assert_array_equal, assert_raises, assert_allclose
)
import threading
import queue
def fft1(x):
L = len(x)
phase = -2j * np.pi * (np.arange(L) / L)
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift:
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D:
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1, maxlen):
assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
atol=1e-12)
assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i),
xr[0:i], atol=1e-12)
def test_fft(self):
x = random(30) + 1j*random(30)
assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6)
assert_allclose(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"), atol=1e-6)
assert_allclose(fft1(x) / 30.,
np.fft.fft(x, norm="forward"), atol=1e-6)
@pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward'))
def test_ifft(self, norm):
x = random(30) + 1j*random(30)
assert_allclose(
x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
atol=1e-6)
# Ensure we get the correct error message
with pytest.raises(ValueError,
match='Invalid number of FFT data points'):
np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x), atol=1e-6)
assert_allclose(np.fft.fft2(x),
np.fft.fft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fft2(x) / (30. * 20.),
np.fft.fft2(x, norm="forward"), atol=1e-6)
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x), atol=1e-6)
assert_allclose(np.fft.ifft2(x),
np.fft.ifft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * (30. * 20.),
np.fft.ifft2(x, norm="forward"), atol=1e-6)
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x), atol=1e-6)
assert_allclose(np.fft.fftn(x),
np.fft.fftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.),
np.fft.fftn(x, norm="forward"), atol=1e-6)
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x), atol=1e-6)
assert_allclose(np.fft.ifftn(x),
np.fft.ifftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.),
np.fft.ifftn(x, norm="forward"), atol=1e-6)
def test_rfft(self):
x = random(30)
for n in [x.size, 2*x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
assert_allclose(
np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n),
np.fft.rfft(x, n=n, norm="backward"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / np.sqrt(n),
np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / n,
np.fft.rfft(x, n=n, norm="forward"), atol=1e-6)
def test_irfft(self):
x = random(30)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfft2(self):
x = random((30, 20))
assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
assert_allclose(np.fft.rfft2(x),
np.fft.rfft2(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / (30. * 20.),
np.fft.rfft2(x, norm="forward"), atol=1e-6)
def test_irfft2(self):
x = random((30, 20))
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_rfftn(self):
x = random((30, 20, 10))
assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
assert_allclose(np.fft.rfftn(x),
np.fft.rfftn(x, norm="backward"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.),
np.fft.rfftn(x, norm="forward"), atol=1e-6)
def test_irfftn(self):
x = random((30, 20, 10))
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"),
norm="backward"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"),
norm="ortho"), atol=1e-6)
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"),
norm="forward"), atol=1e-6)
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm),
np.fft.hfft(x_herm, norm="backward"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / 30.,
np.fft.hfft(x_herm, norm="forward"), atol=1e-6)
def test_ihfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="backward"), norm="backward"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="ortho"), norm="ortho"), atol=1e-6)
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,
norm="forward"), norm="forward"), atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_allclose(op_tr, tr_op, atol=1e-6)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(np.fft.fft, np.fft.ifft),
(np.fft.rfft, np.fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(np.fft.ihfft, np.fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2*x.size]:
for norm in [None, 'backward', 'ortho', 'forward']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_allclose(x_norm,
np.linalg.norm(tmp), atol=1e-6)
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted and internally
# converted to 64bit
x = random(30).astype(dtype)
assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.complex64, np.complex128])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[np.fft.fft, np.fft.fft2, np.fft.fftn,
np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
# See discussion in pull/14178
_tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
else:
raise ValueError()
class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
| |
#**********************************************************
#* Classes and utilities for outputting test results
#*
#**********************************************************
import os.path
import re
import smtplib
import time
import java.lang
import java.util
import java.io
from jyunit import SimpleResults, MultiResults, JythonAssertionFailedError, JythonComparisonFailure, JythonException
# regex for class names to skip as "internal" in stack traces
INTERNAL_CLASS_REGEX = re.compile("(org\.python\.|sun\.reflect\.|java\.lang\.reflect\.|org\.junit\.|junit\.)")
class ResultPrinter(object):
'''Base class for outputting test results'''
def __init__(self, results):
self.results = results
def getFailureStack(self, exception):
'''Returns the contents of the exception stack trace as a string'''
strout = java.io.StringWriter()
pout = java.io.PrintWriter(strout)
outtxt = ''
try:
exception.printStackTrace(pout)
pout.flush()
outtxt = strout.toString()
finally:
pout.close()
strout.close()
return outtxt
class TextPrinter(ResultPrinter):
'''Prints test results in a plain text format suitable for simple email reports. The output
consists of 4 separate sections:
* a header containing overall stats (total # of tests, errors and failures)
* summary list of individual test class failures
* summary list of passed test classes
* detailed failure and error output by test class
'''
def __init__(self, results, out=java.lang.System.err):
self.results = results
self.out = out
def printAll(self):
(passed, failed) = self.results.splitResults()
self.printHeader(self.out, passed, failed)
self.printSummary(self.out, passed, failed)
# print failure details
if len(failed) > 0:
self.out.println("DETAILS: ")
self.out.println("-"*50)
self.printFailedDetails(self.out, failed)
def printHeader(self, out, passed, failed):
out.print( \
'''\
Run: %d, Passed: %d, Failed: %d
Generated %s
Run Time: %d sec
''' % ( self.results.totalRunCount(), len(passed), len(failed), java.util.Date().toString(), self.results.totalRunTime() ) )
def printSummary(self, out, passed, failed):
if len(failed) > 0:
out.println("FAILED: ")
out.println("-"*50)
self.printFailed(out, failed)
out.println()
if len(passed) > 0:
out.println("SUCCESSFUL: ")
out.println("-"*50)
self.printPassed(out, passed)
out.println()
def getTestCountText(self, res):
extrainfo = ""
if isinstance(res, MultiResults):
extrainfo = "%3d tests " % (res.getTestCount())
if res.assertCount > 0:
if extrainfo:
extrainfo = extrainfo + "; "
extrainfo = extrainfo + ("%3d assertions" % (res.assertCount))
return extrainfo
def printPassed(self, out, passed):
for res in passed:
out.println( "%-60s %4d sec; %s" % ('[ '+str(res.test)+' ]:', res.totalTime, self.getTestCountText(res)) )
def printFailed(self, out, failed):
fcnt = 1
for f in failed:
out.println( "%-60s %4d sec; %s %3d failures %3d errors" \
% ( '[ '+str(f.test)+' ]:', f.totalTime, self.getTestCountText(f), f.getFailureCount(), f.getErrorCount() ) )
def printFailedDetails(self, out, failed):
for f in failed:
self.printFailure(out, f)
def printFailure(self, out, f, depth=0):
if depth == 0:
out.print( \
'''%-40s %4d sec; %s %3d failures %3d errors
''' % ( '[ '+str(f.test)+' ]:', f.totalTime, self.getTestCountText(f), f.getFailureCount(), f.getErrorCount() ) )
else:
out.println( "( %s ) " % (str(f.test)) )
if isinstance(f, MultiResults):
for c in f.getAllTestCases():
if c.getErrorCount() > 0 or c.getFailureCount() > 0:
self.printFailure(out, c, depth+1)
else:
if f.getFailureCount() > 0:
out.println(' Failures:')
self.printErrors(out, f.failures, True)
if f.getErrorCount() > 0:
out.println(' Errors:')
self.printErrors(out, f.errors, False)
out.println()
def printErrors(self, out, errors, abbrev=False):
indent = ' '*4
subindent = ' '*8
if len(errors) > 0:
failCnt = 1
for e in errors:
out.print( '''%s%d. ''' % (indent, failCnt) )
if abbrev:
self.printFailureStack(out, e.thrownException(), subindent)
else:
e.thrownException().printStackTrace(out)
failCnt = failCnt + 1
def printFailureStack(self, out, exception, indent):
'''Prints an abbreviated version of the stack trace for failures'''
# delegate to custom exceptions
if isinstance(exception, JythonAssertionFailedError) or \
isinstance(exception, JythonComparisonFailure) or \
isinstance(exception, JythonException):
exception.printStackTrace(out)
return
out.println("%s: %s" % (exception.getClass().getName(), exception.getMessage()))
skipping = False
for elm in exception.getStackTrace():
if INTERNAL_CLASS_REGEX.match(elm.getClassName()):
if not skipping:
skipping = True
out.println("%s ..." % (indent))
continue
skipping = False
out.println("%s at %s" % (indent, elm.toString()))
# TODO: chain exception output
class XMLPrinter(ResultPrinter):
'''Outputs test results in an XML format like that used by the Ant JUnit task.
The results for each test class are output as a separate XML file in the common
directory passed to the constructor. Each file is named in the format:
TEST-<classname>.xml
'''
def __init__(self, results, dirname):
self.results = results
self.dirname = dirname
from xml.dom.minidom import getDOMImplementation
self.dom = getDOMImplementation()
def printAll(self):
'''For each test class in the results, outputs an XML file of results for that class'''
for t in self.results.results.values():
self.printTest(t)
def printTest(self, test):
doc = self.dom.createDocument(None, "testsuite", None)
root = doc.documentElement
root.setAttribute("errors", str(test.getErrorCount()))
root.setAttribute("failures", str(test.getFailureCount()))
root.setAttribute("hostname", self.results.host)
root.setAttribute("name", str(test.test))
root.setAttribute("tests", str(test.getTestCount()))
root.setAttribute("time", str(test.totalTime))
root.setAttribute("timestamp", time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime(test.startTime)))
# add runtime properties
sysprops = java.lang.System.getProperties()
propselt = doc.createElement("properties")
for n in sysprops.propertyNames():
prop = doc.createElement("property")
prop.setAttribute("name", n)
prop.setAttribute("value", sysprops.getProperty(n))
propselt.appendChild(prop)
root.appendChild(propselt)
# add individual test cases
if isinstance(test, MultiResults):
for c in test.getAllTestCases():
self.printTestCase(doc, test, c)
else:
# print as a single test case
self.printTestCase(doc, test, test, "[main]")
# add any top level errors
#if test.getErrorCount() > 0:
# for e in test.errors:
# self.printError("error", doc, root, e)
#if test.getFailureCount() > 0:
# for f in test.failures:
# self.printError("failure", doc, root, f)
# TODO: actually capture output here
root.appendChild( doc.createElement("system-out") )
root.appendChild( doc.createElement("system-err") )
# output the document
fname = "TEST-%s.xml" % (test.test)
outfile = os.path.join(self.dirname, fname)
fout = open(outfile, "w")
try:
doc.writexml(fout, addindent=" ", newl="\n", encoding="UTF-8")
doc.unlink()
finally:
fout.close()
def printTestCase(self, doc, test, caseresults, casename=None):
if casename is None:
casename = str(caseresults.test)
root = doc.documentElement
caseelt = doc.createElement("testcase")
caseelt.setAttribute("classname", str(test.test))
caseelt.setAttribute("name", casename)
caseelt.setAttribute("time", str(caseresults.totalTime))
root.appendChild(caseelt)
# output any errors
if caseresults.getErrorCount() > 0:
for e in caseresults.errors:
self.printError("error", doc, caseelt, e)
if caseresults.getFailureCount() > 0:
for f in caseresults.failures:
self.printError("failure", doc, caseelt, f)
def printError(self, eltType, doc, parent, err):
elt = doc.createElement(eltType)
if err.thrownException():
if err.thrownException().getMessage():
elt.setAttribute("message", err.thrownException().getMessage())
elt.setAttribute("type", err.thrownException().getClass().getName())
elt.appendChild( doc.createTextNode(self.getFailureStack(err.thrownException())) )
parent.appendChild(elt)
def sendReport( collector, fname, recips, product='' ):
'''Emails the test report output to the comma-separated email addresses in "recips"'''
f = open(fname)
reportText = f.read()
f.close()
reciplist = recips.split(",")
if product is not None and len(product) > 0:
product = ' ' + product
errorcnt = ''
errortxt = 'error'
if collector.totalErrors > 0:
if collector.totalErrors > 1:
errortxt = 'errors'
errorcnt = "%d %s" % (collector.totalErrors, errortxt)
else:
errorcnt = "Success"
mesg = \
'''\
From: %s
To: %s
Subject: {DEV MODE} [%s] %s Test Run: %s
%s
''' % ('nobody@meetup.com', ', '.join(reciplist), collector.host, product, errorcnt, reportText)
server = smtplib.SMTP('mail.int.meetup.com')
server.sendmail('nobody@meetup.com', reciplist, mesg)
server.quit()
| |
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import Counter
import logging
import itertools
import json
import time
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from dateutil.parser import parse as parse_date
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import (
CrossAccountAccessFilter, Filter, FilterRegistry, AgeFilter, ValueFilter,
ANNOTATION_KEY, FilterValidationError, OPERATORS)
from c7n.filters.health import HealthEventFilter
from c7n.manager import resources
from c7n.resources.kms import ResourceKmsKeyAlias
from c7n.query import QueryResourceManager
from c7n.utils import (
camelResource,
chunks,
get_retry,
local_session,
set_annotation,
type_schema,
worker,
)
from c7n.resources.ami import AMI
log = logging.getLogger('custodian.ebs')
filters = FilterRegistry('ebs.filters')
actions = ActionRegistry('ebs.actions')
@resources.register('ebs-snapshot')
class Snapshot(QueryResourceManager):
class resource_type(object):
service = 'ec2'
type = 'snapshot'
enum_spec = (
'describe_snapshots', 'Snapshots', {'OwnerIds': ['self']})
detail_spec = None
id = 'SnapshotId'
filter_name = 'SnapshotIds'
filter_type = 'list'
name = 'SnapshotId'
date = 'StartTime'
dimension = None
default_report_fields = (
'SnapshotId',
'VolumeId',
'tag:InstanceId',
'VolumeSize',
'StartTime',
'State',
)
filter_registry = FilterRegistry('ebs-snapshot.filters')
action_registry = ActionRegistry('ebs-snapshot.actions')
@Snapshot.filter_registry.register('age')
class SnapshotAge(AgeFilter):
"""EBS Snapshot Age Filter
Filters an EBS snapshot based on the age of the snapshot (in days)
:example:
.. code-block: yaml
policies:
- name: ebs-snapshots-week-old
resource: ebs-snapshot
filters:
- type: age
days: 7
op: ge
"""
schema = type_schema(
'age',
days={'type': 'number'},
op={'type': 'string', 'enum': list(OPERATORS.keys())})
date_attribute = 'StartTime'
def _filter_ami_snapshots(self, snapshots):
if not self.data.get('value', True):
return snapshots
# try using cache first to get a listing of all AMI snapshots and compares resources to the list
# This will populate the cache.
amis = self.manager.get_resource_manager('ami').resources()
ami_snaps = []
for i in amis:
for dev in i.get('BlockDeviceMappings'):
if 'Ebs' in dev and 'SnapshotId' in dev['Ebs']:
ami_snaps.append(dev['Ebs']['SnapshotId'])
matches = []
for snap in snapshots:
if snap['SnapshotId'] not in ami_snaps:
matches.append(snap)
return matches
@Snapshot.filter_registry.register('cross-account')
class SnapshotCrossAccountAccess(CrossAccountAccessFilter):
permissions = ('ec2:DescribeSnapshotAttribute',)
def process(self, resources, event=None):
self.accounts = self.get_accounts()
results = []
with self.executor_factory(max_workers=3) as w:
futures = []
for resource_set in chunks(resources, 50):
futures.append(w.submit(
self.process_resource_set, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception checking cross account access \n %s" % (
f.exception()))
continue
results.extend(f.result())
return results
def process_resource_set(self, resource_set):
client = local_session(self.manager.session_factory).client('ec2')
results = []
for r in resource_set:
attrs = self.manager.retry(
client.describe_snapshot_attribute,
SnapshotId=r['SnapshotId'],
Attribute='createVolumePermission')['CreateVolumePermissions']
shared_accounts = {
g.get('Group') or g.get('UserId') for g in attrs}
delta_accounts = shared_accounts.difference(self.accounts)
if delta_accounts:
r['c7n:CrossAccountViolations'] = list(delta_accounts)
results.append(r)
return results
@Snapshot.filter_registry.register('skip-ami-snapshots')
class SnapshotSkipAmiSnapshots(Filter):
"""
Filter to remove snapshots of AMIs from results
This filter is 'true' by default.
:example:
.. implicit with no parameters, 'true' by default
.. code-block: yaml
policies:
- name: delete-stale-snapshots
resource: ebs-snapshot
filters:
- type: age
days: 28
op: ge
- skip-ami-snapshots
:example:
.. explicit with parameter
.. code-block: yaml
policies:
- name: delete-snapshots
resource: ebs-snapshot
filters:
- type: age
days: 28
op: ge
- type: skip-ami-snapshots
value: false
"""
schema = type_schema('skip-ami-snapshots', value={'type': 'boolean'})
def get_permissions(self):
return AMI(self.manager.ctx, {}).get_permissions()
def validate(self):
if not isinstance(self.data.get('value', True), bool):
raise FilterValidationError(
"invalid config: expected boolean value")
return self
def process(self, snapshots, event=None):
resources = _filter_ami_snapshots(self, snapshots)
return resources
@Snapshot.action_registry.register('delete')
class SnapshotDelete(BaseAction):
"""Deletes EBS snapshots
:example:
.. code-block: yaml
policies:
- name: delete-stale-snapshots
resource: ebs-snapshots
filters:
- type: age
days: 28
op: ge
actions:
- delete
"""
schema = type_schema(
'delete', **{'skip-ami-snapshots': {'type': 'boolean'}})
permissions = ('ec2.DeleteSnapshot',)
def process(self, snapshots):
self.image_snapshots = set()
# Be careful re image snapshots, we do this by default
# to keep things safe by default, albeit we'd get an error
# if we did try to delete something associated to an image.
pre = len(snapshots)
snapshots = list(filter(None, _filter_ami_snapshots(self, snapshots)))
post = len(snapshots)
log.info("Deleting %d snapshots, auto-filtered %d ami-snapshots",
post, pre - post)
with self.executor_factory(max_workers=2) as w:
futures = []
for snapshot_set in chunks(reversed(snapshots), size=50):
futures.append(
w.submit(self.process_snapshot_set, snapshot_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting snapshot set \n %s" % (
f.exception()))
return snapshots
@worker
def process_snapshot_set(self, snapshots_set):
c = local_session(self.manager.session_factory).client('ec2')
for s in snapshots_set:
if s['SnapshotId'] in self.image_snapshots:
continue
try:
c.delete_snapshot(
SnapshotId=s['SnapshotId'],
DryRun=self.manager.config.dryrun)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidSnapshot.NotFound":
continue
raise
@Snapshot.action_registry.register('copy')
class CopySnapshot(BaseAction):
"""Copy a snapshot across regions
http://goo.gl/CP3dq
:example:
.. code-block: yaml
policies:
- name: copy-snapshot-east-west
resource: ebs-snapshot
filters:
- type: age
days: 7
op: le
actions:
- type: copy
target_region: us-west-2
target_key: *target_kms_key*
encrypted: true
"""
schema = type_schema(
'copy',
target_region={'type': 'string'},
target_key={'type': 'string'},
encrypted={'type': 'boolean'},
)
permissions = (
'ec2:CreateTags', 'ec2:CopySnapshot', 'ec2:DescribeSnapshots')
def validate(self):
if self.data.get('encrypted', True):
key = self.data.get('target_key')
if not key:
raise FilterValidationError(
"Encrypted snapshot copy requires kms key")
return self
def process(self, resources):
if self.data['target_region'] == self.manager.config.region:
self.log.info(
"Source and destination region are the same, skipping")
return
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_resource_set, chunks(resources, 20)))
@worker
def process_resource_set(self, resource_set):
client = self.manager.session_factory(
region=self.data['target_region']).client('ec2')
if self.data['target_region'] != self.manager.config.region:
cross_region = True
params = {}
params['Encrypted'] = self.data.get('encrypted', True)
if params['Encrypted']:
params['KmsKeyId'] = self.data['target_key']
for snapshot_set in chunks(resource_set, 5):
for r in snapshot_set:
snapshot_id = client.copy_snapshot(
SourceRegion=self.manager.config.region,
SourceSnapshotId=r['SnapshotId'],
Description=r.get('Description', ''),
**params)['SnapshotId']
if r.get('Tags'):
client.create_tags(
Resources=[snapshot_id], Tags=r['Tags'])
r['c7n:CopiedSnapshot'] = snapshot_id
if not cross_region or len(snapshot_set) < 5:
continue
copy_ids = [r['c7n:CopiedSnapshot'] for r in snapshot_set]
self.log.debug(
"Waiting on cross-region snapshot copy %s", ",".join(copy_ids))
waiter = client.get_waiter('snapshot_completed')
waiter.config.delay = 60
waiter.config.max_attempts = 60
waiter.wait(SnapshotIds=copy_ids)
self.log.debug(
"Cross region copy complete %s", ",".join(copy_ids))
@resources.register('ebs')
class EBS(QueryResourceManager):
class resource_type(object):
service = 'ec2'
type = 'volume'
enum_spec = ('describe_volumes', 'Volumes', None)
name = id = 'VolumeId'
filter_name = 'VolumeIds'
filter_type = 'list'
date = 'createTime'
dimension = 'VolumeId'
metrics_namespace = 'AWS/EBS'
config_type = "AWS::EC2::Volume"
default_report_fields = (
'VolumeId',
'Attachments[0].InstanceId',
'Size',
'VolumeType',
'KmsKeyId'
)
filter_registry = filters
action_registry = actions
@filters.register('instance')
class AttachedInstanceFilter(ValueFilter):
"""Filter volumes based on filtering on their attached instance
:example:
.. code-block: yaml
policies:
- name: instance-ebs-volumes
resource: ebs
filters:
- instance
"""
schema = type_schema('instance', rinherit=ValueFilter.schema)
def get_permissions(self):
return self.manager.get_resource_manager('ec2').get_permissions()
def process(self, resources, event=None):
original_count = len(resources)
resources = [r for r in resources if r.get('Attachments')]
self.log.debug('Filtered from %d volumes to %d attached volumes' % (
original_count, len(resources)))
self.instance_map = self.get_instance_mapping(resources)
return list(filter(self, resources))
def __call__(self, r):
instance = self.instance_map[r['Attachments'][0]['InstanceId']]
if self.match(instance):
r['Instance'] = instance
set_annotation(r, ANNOTATION_KEY, "instance-%s" % self.k)
return True
def get_instance_mapping(self, resources):
instance_ids = [r['Attachments'][0]['InstanceId'] for r in resources]
instances = self.manager.get_resource_manager(
'ec2').get_resources(instance_ids)
self.log.debug("Queried %d instances for %d volumes" % (
len(instances), len(resources)))
return {i['InstanceId']: i for i in instances}
@filters.register('kms-alias')
class KmsKeyAlias(ResourceKmsKeyAlias):
def process(self, resources, event=None):
return self.get_matching_aliases(resources)
@filters.register('fault-tolerant')
class FaultTolerantSnapshots(Filter):
"""
This filter will return any EBS volume that does/does not have a
snapshot within the last 7 days. 'Fault-Tolerance' in this instance
means that, in the event of a failure, the volume can be restored
from a snapshot with (reasonable) data loss
- name: ebs-volume-tolerance
- resource: ebs
- filters: [{
'type': 'fault-tolerant',
'tolerant': True}]
"""
schema = type_schema('fault-tolerant', tolerant={'type': 'boolean'})
check_id = 'H7IgTzjTYb'
permissions = ('support:RefreshTrustedAdvisorCheck',
'support:DescribeTrustedAdvisorCheckResult')
def pull_check_results(self):
result = set()
client = local_session(self.manager.session_factory).client('support')
client.refresh_trusted_advisor_check(checkId=self.check_id)
results = client.describe_trusted_advisor_check_result(
checkId=self.check_id, language='en')['result']
for r in results['flaggedResources']:
result.update([r['metadata'][1]])
return result
def process(self, resources, event=None):
flagged = self.pull_check_results()
if self.data.get('tolerant', True):
return [r for r in resources if r['VolumeId'] not in flagged]
return [r for r in resources if r['VolumeId'] in flagged]
@filters.register('health-event')
class HealthFilter(HealthEventFilter):
schema = type_schema(
'health-event',
types={'type': 'array', 'items': {
'type': 'string',
'enum': ['AWS_EBS_DEGRADED_EBS_VOLUME_PERFORMANCE',
'AWS_EBS_VOLUME_LOST']}},
statuses={'type': 'array', 'items': {
'type': 'string',
'enum': ['open', 'upcoming', 'closed']
}})
permissions = HealthEventFilter.permissions + (
'config:GetResourceConfigHistory',)
def process(self, resources, event=None):
if 'AWS_EBS_VOLUME_LOST' not in self.data['types']:
return super(HealthFilter, self).process(resources, event)
if not resources:
return resources
client = local_session(self.manager.session_factory).client(
'health', region_name='us-east-1')
f = self.get_filter_parameters()
resource_map = {}
paginator = client.get_paginator('describe_events')
events = list(itertools.chain(
*[p['events']for p in paginator.paginate(filter=f)]))
entities = self.process_event(events)
event_map = {e['arn']: e for e in events}
for e in entities:
rid = e['entityValue']
if not resource_map.get(rid):
resource_map[rid] = self.load_resource(rid)
resource_map[rid].setdefault(
'c7n:HealthEvent', []).append(event_map[e['eventArn']])
return list(resource_map.values())
def load_resource(self, rid):
config = local_session(self.manager.session_factory).client('config')
resources_histories = config.get_resource_config_history(
resourceType='AWS::EC2::Volume',
resourceId=rid,
limit=2)['configurationItems']
for r in resources_histories:
if r['configurationItemStatus'] != u'ResourceDeleted':
return camelResource(json.loads(r['configuration']))
return {"VolumeId": rid}
@actions.register('copy-instance-tags')
class CopyInstanceTags(BaseAction):
"""Copy instance tags to its attached volume.
Useful for cost allocation to ebs volumes and tracking usage
info for volumes.
Mostly useful for volumes not set to delete on termination, which
are otherwise candidates for garbage collection, copying the
instance tags gives us more semantic information to determine if
their useful, as well letting us know the last time the volume
was actually used.
:example:
.. code-block: yaml
policies:
- name: ebs-copy-instance-tags
resource: ebs
filters:
- type: value
key: "Attachments[0].Device"
value: not-null
actions:
- type: copy-instance-tags
tags:
- Name
"""
schema = type_schema(
'copy-instance-tags',
tags={'type': 'array', 'items': {'type': 'string'}})
def get_permissions(self):
perms = self.manager.get_resource_manager('ec2').get_permissions()
perms.append('ec2:CreateTags')
return perms
def process(self, volumes):
vol_count = len(volumes)
volumes = [v for v in volumes if v['Attachments']]
if len(volumes) != vol_count:
self.log.warning(
"ebs copy tags action implicitly filtered from %d to %d",
vol_count, len(volumes))
self.initialize(volumes)
with self.executor_factory(max_workers=10) as w:
futures = []
for instance_set in chunks(sorted(
self.instance_map.keys(), reverse=True), size=100):
futures.append(
w.submit(self.process_instance_set, instance_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception copying instance tags \n %s" % (
f.exception()))
def initialize(self, volumes):
instance_vol_map = {}
for v in volumes:
instance_vol_map.setdefault(
v['Attachments'][0]['InstanceId'], []).append(v)
instance_map = {
i['InstanceId']: i for i in
self.manager.get_resource_manager('ec2').get_resources(
list(instance_vol_map.keys()))}
self.instance_vol_map = instance_vol_map
self.instance_map = instance_map
def process_instance_set(self, instance_ids):
client = local_session(self.manager.session_factory).client('ec2')
for i in instance_ids:
try:
self.process_instance_volumes(
client,
self.instance_map[i],
self.instance_vol_map[i])
except Exception as e:
self.log.exception(
"Error copy instance:%s tags to volumes: %s \n %s",
i, ",".join([v['VolumeId'] for v in self.instance_vol_map[i]]),
e)
def process_instance_volumes(self, client, instance, volumes):
for v in volumes:
copy_tags = self.get_volume_tags(v, instance, v['Attachments'][0])
if not copy_tags:
continue
# Can't add more tags than the resource supports could try
# to delete extant ones inline, else trim-tags action.
if len(copy_tags) > 40:
log.warning(
"action:%s volume:%s instance:%s too many tags to copy" % (
self.__class__.__name__.lower(),
v['VolumeId'], instance['InstanceId']))
continue
try:
self.manager.retry(
client.create_tags,
Resources=[v['VolumeId']],
Tags=copy_tags,
DryRun=self.manager.config.dryrun)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidVolume.NotFound":
continue
raise
def get_volume_tags(self, volume, instance, attachment):
only_tags = self.data.get('tags', []) # specify which tags to copy
copy_tags = []
extant_tags = dict([
(t['Key'], t['Value']) for t in volume.get('Tags', [])])
for t in instance.get('Tags', ()):
if only_tags and not t['Key'] in only_tags:
continue
if t['Key'] in extant_tags and t['Value'] == extant_tags[t['Key']]:
continue
if t['Key'].startswith('aws:'):
continue
copy_tags.append(t)
# Don't add attachment tags if we're already current
if 'LastAttachInstance' in extant_tags \
and extant_tags['LastAttachInstance'] == attachment['InstanceId']:
return copy_tags
copy_tags.append(
{'Key': 'LastAttachTime',
'Value': attachment['AttachTime'].isoformat()})
copy_tags.append(
{'Key': 'LastAttachInstance', 'Value': attachment['InstanceId']})
return copy_tags
@actions.register('encrypt-instance-volumes')
class EncryptInstanceVolumes(BaseAction):
"""Encrypt extant volumes attached to an instance
- Requires instance restart
- Not suitable for autoscale groups.
Multistep process:
- Stop instance (if running)
- For each volume
- Create snapshot
- Wait on snapshot creation
- Copy Snapshot to create encrypted snapshot
- Wait on snapshot creation
- Create encrypted volume from snapshot
- Wait on volume creation
- Delete transient snapshots
- Detach Unencrypted Volume
- Attach Encrypted Volume
- Set DeleteOnTermination instance attribute equal to source volume
- For each volume
- Delete unencrypted volume
- Start Instance (if originally running)
- For each newly encrypted volume
- Delete transient tags
:example:
.. code-block:: yaml
policies:
- name: encrypt-unencrypted-ebs
resource: ebs
filters:
- Encrypted: false
actions:
- type: encrypt-instance-volumes
key: alias/encrypted
"""
schema = type_schema(
'encrypt-instance-volumes',
required=['key'],
key={'type': 'string'},
delay={'type': 'number'},
verbose={'type': 'boolean'})
permissions = (
'ec2:CopySnapshot',
'ec2:CreateSnapshot',
'ec2:CreateVolume',
'ec2:DescribeInstances',
'ec2:DescribeSnapshots',
'ec2:DescribeVolumes',
'ec2:StopInstances',
'ec2:StartInstances',
'ec2:ModifyInstanceAttribute',
'ec2:DeleteTags')
def validate(self):
key = self.data.get('key')
if not key:
raise ValueError(
"action:encrypt-instance-volume "
"requires kms keyid/alias specified")
self.verbose = self.data.get('verbose', False)
return self
def process(self, volumes):
original_count = len(volumes)
volumes = [v for v in volumes
if not v['Encrypted'] or not v['Attachments']]
log.debug(
"EncryptVolumes filtered from %d to %d "
" unencrypted attached volumes" % (
original_count, len(volumes)))
# Group volumes by instance id
instance_vol_map = {}
for v in volumes:
instance_id = v['Attachments'][0]['InstanceId']
instance_vol_map.setdefault(instance_id, []).append(v)
# Query instances to find current instance state
self.instance_map = {
i['InstanceId']: i for i in
self.manager.get_resource_manager('ec2').get_resources(
list(instance_vol_map.keys()), cache=False)}
with self.executor_factory(max_workers=10) as w:
futures = {}
for instance_id, vol_set in instance_vol_map.items():
futures[w.submit(
self.process_volume, instance_id, vol_set)] = instance_id
for f in as_completed(futures):
if f.exception():
instance_id = futures[f]
log.error(
"Exception processing instance:%s volset: %s \n %s" % (
instance_id, instance_vol_map[instance_id],
f.exception()))
def process_volume(self, instance_id, vol_set):
"""Encrypt attached unencrypted ebs volumes
vol_set corresponds to all the unencrypted volumes on a given instance.
"""
key_id = self.get_encryption_key()
if self.verbose:
self.log.debug("Using encryption key: %s" % key_id)
client = local_session(self.manager.session_factory).client('ec2')
# Only stop and start the instance if it was running.
instance_running = self.stop_instance(instance_id)
if instance_running is None:
return
# Create all the volumes before patching the instance.
paired = []
for v in vol_set:
vol_id = self.create_encrypted_volume(v, key_id, instance_id)
paired.append((v, vol_id))
# Next detach and reattach
for v, vol_id in paired:
client.detach_volume(
InstanceId=instance_id, VolumeId=v['VolumeId'])
# 5/8/2016 The detach isn't immediately consistent
time.sleep(self.data.get('delay', 15))
client.attach_volume(
InstanceId=instance_id, VolumeId=vol_id,
Device=v['Attachments'][0]['Device'])
# Set DeleteOnTermination attribute the same as source volume
if v['Attachments'][0]['DeleteOnTermination']:
client.modify_instance_attribute(
InstanceId=instance_id,
BlockDeviceMappings=[
{
'DeviceName': v['Attachments'][0]['Device'],
'Ebs': {
'VolumeId': vol_id,
'DeleteOnTermination': True
}
}
]
)
if instance_running:
client.start_instances(InstanceIds=[instance_id])
if self.verbose:
self.log.debug(
"Deleting unencrypted volumes for: %s" % instance_id)
for v in vol_set:
client.delete_volume(VolumeId=v['VolumeId'])
# Clean-up transient tags on newly created encrypted volume.
for v, vol_id in paired:
client.delete_tags(
Resources=[vol_id],
Tags=[
{'Key': 'maid-crypt-remediation'},
{'Key': 'maid-origin-volume'},
{'Key': 'maid-instance-device'}
]
)
def stop_instance(self, instance_id):
client = local_session(self.manager.session_factory).client('ec2')
instance_state = self.instance_map[instance_id]['State']['Name']
if instance_state in ('shutting-down', 'terminated'):
self.log.debug('Skipping terminating instance: %s' % instance_id)
return
elif instance_state in ('running',):
client.stop_instances(InstanceIds=[instance_id])
self.wait_on_resource(client, instance_id=instance_id)
return True
return False
def create_encrypted_volume(self, v, key_id, instance_id):
# Create a current snapshot
ec2 = local_session(self.manager.session_factory).client('ec2')
results = ec2.create_snapshot(
VolumeId=v['VolumeId'],
Description="maid transient snapshot for encryption",)
transient_snapshots = [results['SnapshotId']]
ec2.create_tags(
Resources=[results['SnapshotId']],
Tags=[
{'Key': 'maid-crypto-remediation', 'Value': 'true'}])
self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
# Create encrypted snapshot from current
results = ec2.copy_snapshot(
SourceSnapshotId=results['SnapshotId'],
SourceRegion=v['AvailabilityZone'][:-1],
Description='maid transient snapshot for encryption',
Encrypted=True,
KmsKeyId=key_id)
transient_snapshots.append(results['SnapshotId'])
ec2.create_tags(
Resources=[results['SnapshotId']],
Tags=[
{'Key': 'maid-crypto-remediation', 'Value': 'true'}
])
self.wait_on_resource(ec2, snapshot_id=results['SnapshotId'])
# Create encrypted volume, also tag so we can recover
results = ec2.create_volume(
Size=v['Size'],
VolumeType=v['VolumeType'],
SnapshotId=results['SnapshotId'],
AvailabilityZone=v['AvailabilityZone'],
Encrypted=True)
ec2.create_tags(
Resources=[results['VolumeId']],
Tags=[
{'Key': 'maid-crypt-remediation', 'Value': instance_id},
{'Key': 'maid-origin-volume', 'Value': v['VolumeId']},
{'Key': 'maid-instance-device',
'Value': v['Attachments'][0]['Device']}])
# Wait on encrypted volume creation
self.wait_on_resource(ec2, volume_id=results['VolumeId'])
# Delete transient snapshots
for sid in transient_snapshots:
ec2.delete_snapshot(SnapshotId=sid)
return results['VolumeId']
def get_encryption_key(self):
kms = local_session(self.manager.session_factory).client('kms')
key_alias = self.data.get('key')
result = kms.describe_key(KeyId=key_alias)
key_id = result['KeyMetadata']['KeyId']
return key_id
def wait_on_resource(self, *args, **kw):
# Sigh this is dirty, but failure in the middle of our workflow
# due to overly long resource creation is complex to unwind,
# with multi-volume instances. Wait up to three times (actual
# wait time is a per resource type configuration.
# Note we wait for all resource creation before attempting to
# patch an instance, so even on resource creation failure, the
# instance is not modified
try:
return self._wait_on_resource(*args, **kw)
except Exception:
try:
return self._wait_on_resource(*args, **kw)
except Exception:
return self._wait_on_resource(*args, **kw)
def _wait_on_resource(
self, client, snapshot_id=None, volume_id=None, instance_id=None):
# boto client waiters poll every 15 seconds up to a max 600s (5m)
if snapshot_id:
if self.verbose:
self.log.debug(
"Waiting on snapshot completion %s" % snapshot_id)
waiter = client.get_waiter('snapshot_completed')
waiter.wait(SnapshotIds=[snapshot_id])
if self.verbose:
self.log.debug("Snapshot: %s completed" % snapshot_id)
elif volume_id:
if self.verbose:
self.log.debug("Waiting on volume creation %s" % volume_id)
waiter = client.get_waiter('volume_available')
waiter.wait(VolumeIds=[volume_id])
if self.verbose:
self.log.debug("Volume: %s created" % volume_id)
elif instance_id:
if self.verbose:
self.log.debug("Waiting on instance stop")
waiter = client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=[instance_id])
if self.verbose:
self.log.debug("Instance: %s stopped" % instance_id)
@actions.register('snapshot')
class CreateSnapshot(BaseAction):
"""Snapshot an EBS volume
:example:
.. code-block: yaml
policies:
- name: snapshot-volumes
resource: ebs
filters:
- Attachments: []
- State: available
actions:
- snapshot
"""
permissions = ('ec2:CreateSnapshot',)
schema = type_schema('snapshot')
def process(self, volumes):
client = local_session(self.manager.session_factory).client('ec2')
retry = get_retry(['Throttled'], max_attempts=5)
for vol in volumes:
vol_id = vol['VolumeId']
retry(client.create_snapshot, VolumeId=vol_id)
@actions.register('delete')
class Delete(BaseAction):
"""Delete an ebs volume.
If the force boolean is true, we will detach an attached volume
from an instance. Note this cannot be done for running instance
root volumes.
:example:
.. code-block: yaml
policies:
- name: delete-unattached-volumes
resource: ebs
filters:
- Attachments: []
- State: available
actions:
- delete
"""
schema = type_schema('delete', force={'type': 'boolean'})
permissions = (
'ec2:DetachVolume', 'ec2:DeleteVolume', 'ec2:DescribeVolumes')
def process(self, volumes):
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_volume, volumes))
def process_volume(self, volume):
client = local_session(self.manager.session_factory).client('ec2')
try:
if self.data.get('force') and len(volume['Attachments']):
client.detach_volume(VolumeId=volume['VolumeId'], Force=True)
waiter = client.get_waiter('volume_available')
waiter.wait(VolumeIds=[volume['VolumeId']])
self.manager.retry(
client.delete_volume, VolumeId=volume['VolumeId'])
except ClientError as e:
if e.response['Error']['Code'] == "InvalidVolume.NotFound":
return
raise
@filters.register('modifyable')
class ModifyableVolume(Filter):
"""Check if an ebs volume is modifyable online.
Considerations - https://goo.gl/CBhfqV
Consideration Summary
- only current instance types are supported (one exception m3.medium)
Current Generation Instances (2017-2) https://goo.gl/iuNjPZ
- older magnetic volume types are not supported
- shrinking volumes is not supported
- must wait at least 6hrs between modifications to the same volume.
- volumes must have been attached after nov 1st, 2016.
See `custodian schema ebs.actions.modify` for examples.
"""
schema = type_schema('modifyable')
older_generation = set((
'm1.small', 'm1.medium', 'm1.large', 'm1.xlarge',
'c1.medium', 'c1.xlarge', 'cc2.8xlarge',
'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cr1.8xlarge',
'hi1.4xlarge', 'hs1.8xlarge', 'cg1.4xlarge', 't1.micro',
# two legs good, not all current gen work either.
'm3.large', 'm3.xlarge', 'm3.2xlarge'
))
permissions = ("ec2:DescribeInstances",)
def process(self, resources, event=None):
results = []
filtered = []
attached = []
stats = Counter()
marker_date = parse_date('2016-11-01T00:00:00+00:00')
# Filter volumes
for r in resources:
# unsupported type
if r['VolumeType'] == 'standard':
stats['vol-type'] += 1
filtered.append(r['VolumeId'])
continue
# unattached are easy
if not r.get('Attachments'):
results.append(r)
continue
# check for attachment date older then supported date
if r['Attachments'][0]['AttachTime'] < marker_date:
stats['attach-time'] += 1
filtered.append(r['VolumeId'])
continue
attached.append(r)
# Filter volumes attached to unsupported instance types
ec2 = self.manager.get_resource_manager('ec2')
instance_map = {}
for v in attached:
instance_map.setdefault(
v['Attachments'][0]['InstanceId'], []).append(v)
instances = ec2.get_resources(list(instance_map.keys()))
for i in instances:
if i['InstanceType'] in self.older_generation:
stats['instance-type'] += len(instance_map[i['InstanceId']])
filtered.extend([v['VolumeId'] for v in instance_map.pop(i['InstanceId'])])
else:
results.extend(instance_map.pop(i['InstanceId']))
# Filter volumes that are currently under modification
client = local_session(self.manager.session_factory).client('ec2')
modifying = set()
for vol_set in chunks(list(results), 200):
vol_ids = [v['VolumeId'] for v in vol_set]
mutating = client.describe_volumes_modifications(
Filters=[
{'Name': 'volume-id',
'Values': vol_ids},
{'Name': 'modification-state',
'Values': ['modifying', 'optimizing', 'failed']}])
for vm in mutating.get('VolumesModifications', ()):
stats['vol-mutation'] += 1
filtered.append(vm['VolumeId'])
modifying.add(vm['VolumeId'])
self.log.debug(
"filtered %d of %d volumes due to %s",
len(filtered), len(resources), sorted(stats.items()))
return [r for r in results if r['VolumeId'] not in modifying]
@actions.register('modify')
class ModifyVolume(BaseAction):
"""Modify an ebs volume online.
**Note this action requires use of modifyable filter**
Intro Blog & Use Cases - https://goo.gl/E3u4Ue
Docs - https://goo.gl/DJM4T0
Considerations - https://goo.gl/CBhfqV
:example:
Find under utilized provisioned iops volumes older than a week
and change their type.
.. code-block: yaml
policies:
- name: ebs-remove-piops
resource: ebs
filters:
- type: value
key: CreateDate
value_type: age
value: 7
op: greater-than
- VolumeType: io1
- type: metrics
name: VolumeConsumedReadWriteOps
statistics: Maximum
value: 100
op: less-than
days: 7
- modifyable
actions:
- type: modify
volume-type: gp1
`iops-percent` and `size-percent` can be used to modify
respectively iops on io1 volumes and volume size.
When converting to io1, `iops-percent` is used to set the iops
allocation for the new volume against the extant value for the old
volume.
:example:
Double storage and quadruple iops for all io1 volumes.
.. code-block: yaml
policies:
- name: ebs-remove-piops
resource: ebs
filters:
- VolumeType: io1
- modifyable
actions:
- type: modify
size-percent: 200
iops-percent: 400
**Note** resizing down aka shrinking requires OS and FS support
and potentially additional preparation, else data-loss may occur.
To prevent accidents, shrinking must be explicitly enabled by also
setting `shrink: true` on the action.
"""
schema = type_schema(
'modify',
**{'volume-type': {'enum': ['io1', 'gp2', 'st1', 'sc1']},
'shrink': False,
'size-percent': {'type': 'number'},
'iops-percent': {'type': 'number'}})
# assumptions as its the closest i can find.
permissions = ("ec2:ModifyVolumeAttribute",)
def validate(self):
if 'modifyable' not in self.manager.data.get('filters', ()):
raise FilterValidationError(
"modify action requires modifyable filter in policy")
if self.data.get('size-percent') < 100 and not self.data.get('shrink', False):
raise FilterValidationError((
"shrinking volumes requires os/fs support "
"or data-loss may ensue, use `shrink: true` to override"))
return self
def process(self, resources):
for resource_set in chunks(resources, 50):
self.process_resource_set(resource_set)
def process_resource_set(self, resource_set):
client = local_session(self.manager.session_factory).client('ec2')
vtype = self.data.get('volume-type')
psize = self.data.get('size-percent')
piops = self.data.get('iops-percent')
for r in resource_set:
params = {'VolumeId': r['VolumeId']}
if piops and ('io1' in (vtype, r['VolumeType'])):
# default here if we're changing to io1
params['Iops'] = max(int(r.get('Iops', 10) * piops / 100.0), 100)
if psize:
params['Size'] = max(int(r['Size'] * psize / 100.0), 1)
if vtype:
params['VolumeType'] = vtype
self.manager.retry(client.modify_volume, **params)
| |
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
import serial
import time
import re
import binascii
import threading
import datetime
import sys
# use USB UART or UART on pcDuino to communicate with zigbee gateway
try:
ser = serial.Serial("/dev/ttyUSB0", 115200,timeout = 0.1)
except Exception:
try:
ser = serial.Serial("/dev/ttyS1", 115200,timeout = 0.1)
with open("/sys/devices/virtual/misc/gpio/mode/gpio0",'w') as UART_RX:
UART_RX.write('3')
with open("/sys/devices/virtual/misc/gpio/mode/gpio1",'w') as UART_TX:
UART_TX.write('3')
except serial.serialutil.SerialException:
print "serial failed!"
exit()
def hexShow(argv):
result = ''
hLen = len(argv)
for i in xrange(hLen):
hvol = ord(argv[i])
hhex = '%02x'%hvol
result += hhex+' '
return result
def register():
while True:
ser.write('\x02')
ser.write('\x75')
ser.write('\x1e')
data = ser.readline()
val=hexShow(data)
leng = len(val)
if leng > 45:
a = val.find("0e fc 02 e1",0)
if a != -1:
print "add equipment ok"
b=a+12
mac = val[b:b+29]
return mac
break
time.sleep(0.2)
def get_info(short_mac):
send = "04 c8 " + short_mac + "01"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
a = rec.find("19 c9 00",0)
if a != -1:
print "get_info ok"
break
def set_target_tmp(short_mac):
send = "0c fc 02 01 04 01 01 01 02"+short_mac+"02 0a"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
a = rec.find("04 fd 02 01",0)
if a != -1:
print "set target ok"
break
time.sleep(0.2)
def set_target_hum(short_mac):
send = "0c fc 02 01 04 01 01 02 02"+short_mac+"02 0a"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
a = rec.find("04 fd 02 01",0)
if a != -1:
print "set target ok"
break
time.sleep(0.2)
def gateway_mac():
while True:
ser.write('\x02')
ser.write('\x14')
ser.write('\x6f')
data = ser.readline()
dat = hexShow(data)
leng = len(dat)
if leng > 30:
a = dat.find("0c 15 00 6f",0)
if a != -1:
dt = dat[15:38]
return dt
break
time.sleep(1)
def bind_tmp(eq_mac,gat_mac):
send = "16 d8"+eq_mac+"01 02 04 03"+gat_mac+"01"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
start = datetime.datetime.now()
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
b = rec.find("02 d9 00",0)
if b != -1:
print "bind ok"
break
time.sleep(0.2)
def bind_hum(eq_mac,gat_mac):
send = "16 d8"+eq_mac+"02 05 04 03"+gat_mac+"01"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
start = datetime.datetime.now()
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
b = rec.find("02 d9 00",0)
if b != -1:
print "bind ok"
break
time.sleep(0.2)
def report_tmp():
send = "11 FC 00 02 04 06 01 00 00 00 29 05 00 05 00 01 00 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
leng = len(rec)
if leng > 15:
b = rec.find("06 fd 00")
if b != -1:
print "send report ok"
break
time.sleep(0.2)
def report_hum():
send = "11 FC 00 05 04 06 01 00 00 00 21 05 00 05 00 01 00 00"
s = send.replace(' ','')
a=binascii.a2b_hex(s)
while True:
ser.write(a)
recv=ser.readline()
rec=hexShow(recv)
leng = len(rec)
if leng > 15:
b = rec.find("06 fd 00")
if b != -1:
print "send report ok"
break
time.sleep(0.2)
def get_tmp():
val=register()
short = val[0:5]
print "short:"+short
mac = val[6:29]
print "mac:"+mac
gatmac = gateway_mac()
print "gatewaymac:"+gatmac
set_target_tmp(short)
bind_tmp(mac,gatmac)
report_tmp()
def get_hum():
val=register()
short = val[0:5]
print "short:"+short
mac = val[6:29]
print "mac:"+mac
gatmac = gateway_mac()
print "gatewaymac:"+gatmac
set_target_hum(short)
bind_hum(mac,gatmac)
report_hum()
def msg():
line = ser.readline()
val = hexShow(line)
leng = len(val)
if leng >= 40:
print val
a = val.find("01 02",0)
b = val.find("02 02",0)
if a != -1:
tmp = val[a+39:a+44].replace(' ','')
t = "0x" + tmp[2:4] + tmp[0:2]
temp = int(t,16)
print "tem:"
print temp/100.0
if b != -1:
hum = val[b+39:b+44].replace(' ','')
h= "0x" + hum[2:4] + hum[0:2]
temp = int(h,16)
print "hum:"
print temp/100.0
def main():
if ser.isOpen() == True:
print "serial open succeed!"
else:
print "serial open failure!"
#get_tmp()
#get_hum()
while True:
msg()
if __name__=='__main__':
try:
main()
except KeyboardInterrupt:
ser.close()
| |
'''
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
Modified by whale, 7 January 2015
'''
from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
import mmh3
# TL; DR, the main training process starts on line: 250,
# you may want to start reading the code from there
##############################################################################
# parameters #################################################################
##############################################################################
# A, paths
train = 'train.csv' # path to training file
test = 'train_day31.csv' # path to testing file
submission = 'logistic2.csv' # path of to be outputted submission file
# B, model
alpha = .5 # learning rate
beta = 8. # smoothing parameter for adaptive learning rate
L1 = 3. # L1 regularization, larger value means more regularized
L2 = 20. # L2 regularization, larger value means more regularized
# C, feature/hash trick
D = 2**24 # number of weights to use
interaction = False # whether to enable poly2 feature interactions
# D, training/validation
epoch = 5 # learn training data for N passes
holdafter = 29 # data after date N (exclusive) are used as validation
holdout = None # use every N training instance for holdout validation
##############################################################################
# class, function, generator definitions #####################################
##############################################################################
class ftrl_proximal(object):
''' Our main algorithm: Follow the regularized leader - proximal
In short,
this is an adaptive-learning-rate sparse logistic-regression with
efficient L1-L2-regularization
Reference:
http://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf
'''
def __init__(self, alpha, beta, L1, L2, D, interaction):
# parameters
self.alpha = alpha
self.beta = beta
self.L1 = L1
self.L2 = L2
# feature related parameters
self.D = D
self.interaction = interaction
# model
# n: squared sum of past gradients
# z: weights
# w: lazy weights
self.n = [0.] * D
self.z = [0.] * D
self.w = {}
def _indices(self, x):
''' A helper generator that yields the indices in x
The purpose of this generator is to make the following
code a bit cleaner when doing feature interaction.
'''
# first yield index of the bias term
yield 0
# then yield the normal indices
for index in x:
yield index
# now yield interactions (if applicable)
if self.interaction:
D = self.D
L = len(x)
x = sorted(x)
for i in xrange(L):
for j in xrange(i+1, L):
# one-hot encode interactions with hash trick
yield abs(hash(str(x[i]) + '_' + str(x[j]))) % D
def predict(self, x):
''' Get probability estimation on x
INPUT:
x: features
OUTPUT:
probability of p(y = 1 | x; w)
'''
# parameters
alpha = self.alpha
beta = self.beta
L1 = self.L1
L2 = self.L2
# model
n = self.n
z = self.z
w = {}
# wTx is the inner product of w and x
wTx = 0.
for i in self._indices(x):
sign = -1. if z[i] < 0 else 1. # get sign of z[i]
# build w on the fly using z and n, hence the name - lazy weights
# we are doing this at prediction instead of update time is because
# this allows us for not storing the complete w
if sign * z[i] <= L1:
# w[i] vanishes due to L1 regularization
w[i] = 0.
else:
# apply prediction time L1, L2 regularization to z and get w
w[i] = (sign * L1 - z[i]) / ((beta + sqrt(n[i])) / alpha + L2)
wTx += w[i]
# cache the current w for update stage
self.w = w
# bounded sigmoid function, this is the probability estimation
return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))
def update(self, x, p, y):
''' Update model using x, p, y
INPUT:
x: feature, a list of indices
p: click probability prediction of our model
y: answer
MODIFIES:
self.n: increase by squared gradient
self.z: weights
'''
# parameter
alpha = self.alpha
# model
n = self.n
z = self.z
w = self.w
# gradient under logloss
g = p - y
# update z and n
for i in self._indices(x):
sigma = (sqrt(n[i] + g * g) - sqrt(n[i])) / alpha
z[i] += g - sigma * w[i]
n[i] += g * g
def logloss(p, y):
''' FUNCTION: Bounded logloss
INPUT:
p: our prediction
y: real answer
OUTPUT:
logarithmic loss of p given y
'''
p = max(min(p, 1. - 10e-15), 10e-15)
return -log(p) if y == 1. else -log(1. - p)
def data(path, D):
''' GENERATOR: Apply hash-trick to the original csv row
and for simplicity, we one-hot-encode everything
INPUT:
path: path to training or testing file
D: the max index that we can hash to
YIELDS:
ID: id of the instance, mainly useless
x: a list of hashed and one-hot-encoded 'indices'
we only need the index since all values are either 0 or 1
y: y = 1 if we have a click, else we have y = 0
'''
for t, row in enumerate(DictReader(open(path))):
# process id
ID = row['id']
del row['id']
# process clicks
y = 0.
if 'click' in row:
if row['click'] == '1':
y = 1.
del row['click']
# extract date
date = int(row['hour'][4:6])
row['dayofweek'] = str((date-19)%7)
# turn hour really into hour, it was originally YYMMDDHH
row['hour'] = row['hour'][6:]
# build x
x = []
for key in row:
value = row[key]
# one-hot encode everything with hash trick
index = abs(mmh3.hash(key + '_' + value)) % D
x.append(index)
yield t, date, ID, x, y
##############################################################################
# start training #############################################################
##############################################################################
start = datetime.now()
# initialize ourselves a learner
learner = ftrl_proximal(alpha, beta, L1, L2, D, interaction)
# start training
for e in xrange(epoch):
loss = 0.
count = 0
for t, date, ID, x, y in data(train, D): # data is a generator
# t: just a instance counter
# date: you know what this is
# ID: id provided in original data
# x: features
# y: label (click)
# step 1, get prediction from learner
p = learner.predict(x)
if (holdafter and date > holdafter) or (holdout and t % holdout == 0):
# step 2-1, calculate validation loss
# we do not train with the validation data so that our
# validation loss is an accurate estimation
#
# holdafter: train instances from day 1 to day N
# validate with instances from day N + 1 and after
#
# holdout: validate with every N instance, train with others
loss += logloss(p, y)
count += 1
else:
# step 2-2, update learner with label (click) information
learner.update(x, p, y)
print('Epoch %d finished, validation logloss: %f, elapsed time: %s' % (
e, loss/count, str(datetime.now() - start)))
##############################################################################
# start testing, and build Kaggle's submission file ##########################
##############################################################################
with open(submission, 'w') as outfile:
outfile.write('id,click\n')
for t, date, ID, x, y in data(test, D):
p = learner.predict(x)
outfile.write('%s,%s\n' % (ID, str(p)))
| |
from __future__ import print_function, division, absolute_import
# Non-std. lib imports
from PySide.QtCore import Signal, QObject
from PySide.QtGui import QTabWidget, QVBoxLayout, QWidget, QLineEdit, \
QDoubleValidator, QLabel, QGridLayout
from numpy import asarray, nan
# Local imports
from rapid.gui.guicommon import toolTipText as ttt
class PeakView(QTabWidget):
'''Class to display the peak information'''
def __init__(self, parent = None):
'''Initialize'''
super(PeakView, self).__init__(parent)
self._createWidgets()
def _createWidgets(self):
'''Create the widgets contained in this box'''
self.pages = [
PeakPage("Peak 1", 0),
PeakPage("Peak 2", 1),
PeakPage("Peak 3", 2),
PeakPage("Peak 4", 3),
]
def initUI(self):
'''Initilizes the layout of the contained widgets'''
self.addTab(self.pages[0], self.pages[0].title)
self.addTab(self.pages[1], self.pages[1].title)
self.npeaks = 2
def makeConnections(self):
'''Connect all the contained widgets together'''
self.model.newParams.connect(self.distributeNewParams)
self.model.changeNumPeaks.connect(self.changePeakNum)
self.pages[0].changeInputParams.connect(self.model.setInputParams)
self.pages[1].changeInputParams.connect(self.model.setInputParams)
self.pages[2].changeInputParams.connect(self.model.setInputParams)
self.pages[3].changeInputParams.connect(self.model.setInputParams)
def setModel(self, model):
'''Attaches a model to this view'''
self.model = model
def setPeaks(self, vib, GL, GG, h):
'''Manually set the peak data'''
for i, vals in enumerate(zip(vib, GL, GG, h)):
self.pages[i].setParams(*vals)
#######
# SLOTS
#######
def distributeNewParams(self, p, GL, GG, h):
'''When the new parameters are given, send the results to the
appropriate page'''
for i, vals in enumerate(zip(p, GL, GG, h)):
self.pages[i].viewNewParams(*vals)
def changePeakNum(self, npeaks):
'''Change the number of peaks by adding or removing tabs'''
if self.npeaks == 2:
if npeaks == 3:
self.addTab(self.pages[2], self.pages[2].title)
self.npeaks = 3
elif npeaks == 4:
self.addTab(self.pages[2], self.pages[2].title)
self.addTab(self.pages[3], self.pages[3].title)
self.npeaks = 4
elif self.npeaks == 3:
if npeaks == 2:
self.removeTab(2)
self.npeaks = 2
elif npeaks == 4:
self.addTab(self.pages[3], self.pages[3].title)
self.npeaks = 4
elif self.npeaks == 4:
if npeaks == 2:
self.removeTab(3)
self.removeTab(2)
self.npeaks = 2
elif npeaks == 3:
self.removeTab(3)
self.npeaks = 3
class PeakPage(QWidget):
'''A peak page widget'''
def __init__(self, title, ID):
'''Initialize'''
super(PeakPage, self).__init__()
self.title = title
self.ID = ID
self._createWidgets()
self._initUI()
self._makeConnections()
def _createWidgets(self):
'''Create the contained widgets'''
self.inputpeak = QLineEdit(self)
self.inputGL = QLineEdit(self)
self.inputGG = QLineEdit(self)
self.inputH = QLineEdit(self)
self.newpeak = QLineEdit(self)
self.newGL = QLineEdit(self)
self.newGG = QLineEdit(self)
self.newH = QLineEdit(self)
self.inputpeak.setValidator(QDoubleValidator(300.0, 3000.0, 1,
self.inputpeak))
self.inputGL.setValidator(QDoubleValidator(0.0, 100.0, 3,
self.inputGL))
self.inputGG.setValidator(QDoubleValidator(0.0, 100.0, 3,
self.inputGG))
self.inputH.setValidator(QDoubleValidator(0.0, 1.0, 3,
self.inputH))
self.newpeak.setReadOnly(True)
self.newGL.setReadOnly(True)
self.newGG.setReadOnly(True)
self.newH.setReadOnly(True)
self.inputpeak.setToolTip(ttt('The vibrational frequency of this peak'
', in wavenumbers'))
self.inputGL.setToolTip(ttt('The Lorentzian FWHM broadening of this '
'peak, in wavenumbers'))
self.inputGG.setToolTip(ttt('The Gaussian FWHM broadening of this peak'
', in wavenumbers'))
self.inputH.setToolTip(ttt('The relative height of this peak'))
self.newpeak.setToolTip(ttt('The vibrational frequency after '
'exchange'))
self.newGL.setToolTip(ttt('The Gaussian FWHM after exchange'))
self.newGG.setToolTip(ttt('The Lorentzian FWHM after exchange'))
self.newH.setToolTip(ttt('The relative height after exchange'))
def _initUI(self):
'''Layout the contained widgets'''
lo = QGridLayout()
lo.addWidget(QLabel("Input Value"), 1, 2)
lo.addWidget(QLabel("New Value"), 1, 4)
lo.addWidget(QLabel("Peak"), 2, 1)
lo.addWidget(self.inputpeak, 2, 2)
lo.addWidget(QLabel("-->"), 2, 3)
lo.addWidget(self.newpeak, 2, 4)
lo.addWidget(QLabel("Lorentz FWHM"), 3, 1)
lo.addWidget(self.inputGL, 3, 2)
lo.addWidget(QLabel("-->"), 3, 3)
lo.addWidget(self.newGL, 3, 4)
lo.addWidget(QLabel("Gauss FWHM"), 4, 1)
lo.addWidget(self.inputGG, 4, 2)
lo.addWidget(QLabel("-->"), 4, 3)
lo.addWidget(self.newGG, 4, 4)
lo.addWidget(QLabel("Height"), 5, 1)
lo.addWidget(self.inputH, 5, 2)
lo.addWidget(QLabel("-->"), 5, 3)
lo.addWidget(self.newH, 5, 4)
self.setLayout(lo)
def _makeConnections(self):
'''Connect the contained widgets together'''
self.inputpeak.editingFinished.connect(self.inputParamsChanged)
self.inputGL.editingFinished.connect(self.inputParamsChanged)
self.inputGG.editingFinished.connect(self.inputParamsChanged)
self.inputH.editingFinished.connect(self.inputParamsChanged)
def viewNewParams(self, p, GL, GG, h):
'''View the new parameters after exchange'''
self.newpeak.setText('{0:.1f}'.format(p))
self.newGL.setText('{0:.3f}'.format(GL))
self.newGG.setText('{0:.3f}'.format(GG))
self.newH.setText('{0:.3f}'.format(h))
def setParams(self, p, GL, GG, h):
'''Manually set the parameters on this page'''
self.inputpeak.setText('{0:.1f}'.format(p))
self.inputGL.setText('{0:.3f}'.format(GL))
self.inputGG.setText('{0:.3f}'.format(GG))
self.inputH.setText('{0:.3f}'.format(h))
# Force an updata of the data
self.inputpeak.editingFinished.emit()
#######
# SLOTS
#######
def inputParamsChanged(self):
'''Collects the parameters from this page and broadcasts them'''
try:
vib = float(self.inputpeak.text())
except ValueError:
return
try:
GL = float(self.inputGL.text())
except ValueError:
return
try:
GG = float(self.inputGG.text())
except ValueError:
return
try:
h = float(self.inputH.text())
except ValueError:
return
self.changeInputParams.emit(self.ID, vib, GL, GG, h)
#########
# SIGNALS
#########
# Signals for when a value is changed
changeInputParams = Signal(int, float, float, float, float)
class PeakModel(QObject):
'''Class to hold all information about the peaks'''
def __init__(self, parent = None):
'''Initialize the function class'''
super(PeakModel, self).__init__(parent)
self.npeaks = 0
#self.peaks = [1960.0, 1980.0, 2000.0, 2020.0]
self.peaks = [nan, nan, nan, nan]
self.GL = [nan, nan, nan, nan]
#self.GL = [5.0, 5.0, 5.0, 5.0]
self.GG = [nan, nan, nan, nan]
#self.GG = [5.0, 5.0, 5.0, 5.0]
self.h = [nan, nan, nan, nan]
#self.h = [1.0, 1.0, 1.0, 1.0]
self.newpeaks = [0.0, 0.0, 0.0, 0.0]
self.newGL = [0.0, 0.0, 0.0, 0.0]
self.newGG = [0.0, 0.0, 0.0, 0.0]
self.newh = [0.0, 0.0, 0.0, 0.0]
def getParams(self):
'''Return the input parameters for the given number of peaks'''
return (asarray(self.peaks[:self.npeaks]),
asarray(self.GL[:self.npeaks]),
asarray(self.GG[:self.npeaks]),
asarray(self.h[:self.npeaks]))
#######
# SLOTS
#######
def setNewParams(self, p, GL, GG, h):
'''Set the parameters after exchange'''
# First, replace the values in the lists
for i, vals in enumerate(zip(p, GL, GG, h)):
self.newpeaks[i] = vals[0]
self.newGL[i] = vals[1]
self.newGG[i] = vals[2]
self.newh[i] = vals[3]
# Now broadcast results
self.newParams.emit(p, GL, GG, h)
def changePeakNum(self, npeaks):
'''Change the number of peaks'''
self.npeaks = npeaks
self.changeNumPeaks.emit(self.npeaks)
def setInputParams(self, num, p, GL, GG, h):
'''Set the input parameter for a given peak, then emit'''
self.peaks[num] = p
self.GL[num] = GL
self.GG[num] = GG
self.h[num] = h
self.inputParamsChanged.emit()
#########
# SIGNALS
#########
# Release the parameters to calculate the spectrum
inputParamsChanged = Signal()
# View the new parameters after exchange
newParams = Signal(list, list, list, list)
# Change the number of peaks
changeNumPeaks = Signal(int)
| |
__author__ = 'Ralph'
import wx
import network.base
class Canvas(wx.Panel):
def __init__(self, parent, widgets):
super(Canvas, self).__init__(parent)
self._widgets = widgets
self._nodes = []
self._connections = []
self.Bind(wx.EVT_PAINT, self._render)
self.Bind(wx.EVT_RIGHT_UP, self._show_menu)
self.Bind(wx.EVT_LEFT_DOWN, self._start_dragging)
self.Bind(wx.EVT_LEFT_UP, self._stop_dragging)
self.Bind(wx.EVT_MOTION, self._move)
self.Bind(wx.EVT_LEFT_DCLICK, self._show_dialog)
def create_node(self, widget):
(x, y, width, height) = self.GetClientRect().Get()
position = (x + width / 2, y + height / 2)
self._nodes.append(Node(widget, position))
self.Refresh()
def delete_node(self, node):
for connection in self._connections:
if connection.has_node(node):
self._connections.remove(connection)
self._nodes.remove(node)
self.Refresh()
@staticmethod
def execute_node(node):
node.execute()
def _render(self, e):
device = wx.PaintDC(self)
for node in self._nodes:
node.render(device)
for connection in self._connections:
connection.render(device)
def _show_menu(self, e):
position = e.GetPosition()
menu = CanvasMenu(self, self._widgets)
for node in self._nodes:
if node.contains(position):
menu = NodeMenu(self, node)
self.PopupMenu(menu, e.GetPosition())
def _start_dragging(self, e):
position = e.GetPosition()
for node in self._nodes:
node.set_dragging(False)
if node.is_input_port_selected(position):
connection = Connection()
connection.set_target(node.get_selected_port())
self._connections.append(connection)
break
if node.is_output_port_selected(position):
connection = Connection()
connection.set_source(node.get_selected_port())
self._connections.append(connection)
break
if node.contains(position):
node.set_dragging(True)
break
def _stop_dragging(self, e):
position = e.GetPosition()
for node in self._nodes:
node.set_dragging(False)
if node.is_input_port_selected(position):
for connection in self._connections:
if not connection.has_target():
connection.set_target(node.get_selected_port())
break
continue
if node.is_output_port_selected(position):
for connection in self._connections:
if not connection.has_source():
connection.set_source(node.get_selected_port())
break
continue
for connection in self._connections:
if connection.has_source() and connection.has_target():
continue
if not connection.has_source():
self._connections.remove(connection)
continue
if not connection.has_target():
self._connections.remove(connection)
self.Refresh()
def _move(self, e):
position = e.GetPosition()
for node in self._nodes:
if node.is_dragging():
node.set_position(position)
for connection in self._connections:
if connection.has_source() and connection.has_target():
continue
if not connection.has_source():
connection.set_source_position(position)
continue
if not connection.has_target():
connection.set_target_position(position)
continue
self.Refresh()
def _show_dialog(self, e):
position = e.GetPosition()
for node in self._nodes:
if node.contains(position):
node.get_widget().show()
break
class CanvasMenu(wx.Menu):
def __init__(self, canvas, widgets):
super(CanvasMenu, self).__init__()
self._canvas = canvas
self._widget_ids = {}
menu = wx.Menu()
for widget in widgets:
item = wx.MenuItem(self, wx.NewId(), widget.get_name())
menu.AppendItem(item)
menu.Bind(wx.EVT_MENU, self._create_node, item)
self._widget_ids[item.GetId()] = widget
self.AppendSubMenu(menu, 'New Node')
def _create_node(self, e):
widget_id = e.GetId()
widget = self._widget_ids[widget_id]
self._canvas.create_node(widget)
class Port(object):
def __init__(self, port):
super(Port, self).__init__()
self._port = port
self._width = 10
self._height = 10
self._position = (0, 0)
def render(self, device):
device.SetPen(wx.Pen(wx.BLACK))
device.SetBrush(wx.Brush(wx.BLUE))
device.DrawRectangle(self._position[0], self._position[1], self._width, self._height)
def get_position(self):
return self._position
def set_position(self, position):
self._position = position
def get_port(self):
return self._port
def get_node(self):
return self.get_port().get_node()
def get_name(self):
return self.get_port().get_name()
def contains(self, position):
x = position[0]
y = position[1]
if self._position[0] < x < self._position[0] + self._width and self._position[1] < y < self._position[1] + self._height:
return True
return False
class Node(object):
def __init__(self, widget, position):
super(Node, self).__init__()
self._position = position
self._widget = widget
self._width = 50
self._height = 50
self._dragging = False
self._selected_port = None
self._input_ports = []
for input_port in self.get_node().get_input_ports():
self._input_ports.append(Port(input_port))
self._output_ports = []
for output_port in self.get_node().get_output_ports():
self._output_ports.append(Port(output_port))
def render(self, device):
device.SetPen(wx.Pen(wx.BLACK))
device.SetBrush(wx.Brush(wx.RED))
device.DrawRectangle(self._position[0], self._position[1], self._width, self._height)
device.DrawText(self._widget.get_name(), self._position[0], self._position[1] + self._height + 5)
for i in range(len(self._input_ports)):
input_port = self._input_ports[i]
input_port.set_position((self._position[0] - 10, self._position[1] + i * 20))
input_port.render(device)
for i in range(len(self._output_ports)):
output_port = self._output_ports[i]
output_port.set_position((self._position[0] + self._width - 1, self._position[1] + i * 20))
output_port.render(device)
def get_node(self):
return self.get_widget().get_node()
def get_widget(self):
return self._widget
def get_position(self):
return self._position
def set_position(self, position):
self._position = (position[0] - self._width / 2, position[1] - self._height / 2)
def is_input_port_selected(self, position):
for input_port in self._input_ports:
if input_port.contains(position):
self._selected_port = input_port
return True
return False
def is_output_port_selected(self, position):
for output_port in self._output_ports:
if output_port.contains(position):
self._selected_port = output_port
return True
return False
def get_selected_port(self):
return self._selected_port
def contains(self, position):
x = position[0]
y = position[1]
if self._position[0] < x < self._position[0] + self._width and self._position[1] < y < self._position[1] + self._height:
return True
return False
def is_dragging(self):
return self._dragging
def set_dragging(self, dragging):
self._dragging = dragging
def is_executable(self):
return len(self._input_ports) == 0
def execute(self):
if self.is_executable():
self.get_node().execute()
class NodeMenu(wx.Menu):
def __init__(self, canvas, node):
super(NodeMenu, self).__init__()
self._canvas = canvas
self._node = node
item1 = wx.MenuItem(self, wx.NewId(), 'Delete Node')
self.AppendItem(item1)
self.Bind(wx.EVT_MENU, self._delete_node, item1)
if self._node.is_executable():
item2 = wx.MenuItem(self, wx.NewId(), 'Execute')
self.AppendItem(item2)
self.Bind(wx.EVT_MENU, self._execute_node, item2)
def _delete_node(self, e):
self._canvas.delete_node(self._node)
def _execute_node(self, e):
self._canvas.execute_node(self._node)
class Connection(object):
def __init__(self):
super(Connection, self).__init__()
self._source = None
self._target = None
self._source_position = (0, 0)
self._target_position = (0, 0)
self._connection = None
def render(self, device):
device.SetPen(wx.Pen(wx.BLACK))
(x1, y1) = self._source_position
if self.has_source():
(x1, y1) = self.get_source().get_position()
(x2, y2) = self._target_position
if self.has_target():
(x2, y2) = self.get_target().get_position()
device.DrawLine(x1, y1, x2, y2)
def get_source(self):
return self._source
def set_source(self, source):
self._source = source
self._source_position = source.get_position()
if self.has_target():
self._connection = network.base.Connection(self._source.get_port(), self._target.get_port())
def set_source_position(self, position):
if self._source is None:
self._source_position = position
def has_source(self):
return self.get_source() is not None
def get_target(self):
return self._target
def set_target(self, target):
self._target = target
self._target_position = target.get_position()
if self.has_source():
self._connection = network.base.Connection(self._source.get_port(), self._target.get_port())
def set_target_position(self, position):
if self._target is None:
self._target_position = position
def has_target(self):
return self.get_target() is not None
def has_node(self, node):
if self.has_source() and self.get_source().get_node() is node.get_node():
return True
if self.has_target() and self.get_target().get_node() is node.get_node():
return True
return False
| |
# Functions that create and check the elements contained in the state and
# bundle of functions
import inspect
from Optizelle.Functions import *
import pdb
__doc__ = "Optizelle support functions"
def checkFloat(name,value):
"""Checks that an input is a floating-point number"""
if not isinstance(value,float):
raise TypeError("%s member must be a floating point" % name)
def checkNatural(name,value):
"""Checks that an input is a natural number"""
if not isinstance(value,int) or value < 0:
raise TypeError("%s member must be a natural number" % name)
def checkEnum(name,value):
"""Checks that an input is an enumerated type """
if not isinstance(value,int) or value < 0:
raise TypeError("%s member must be an enumerated type (natural.)"
% name)
def checkEnumRange(name,enum,value):
"""Checks that an input is in a valid enumerated range"""
if not value in enum.__dict__.values():
raise TypeError("%s member is outside the valid enumated range"
% name)
def checkVectorList(name,value):
"""Checks that an input is a list"""
if not isinstance(value,list):
raise TypeError("%s member must be a list of vectors" % name)
def checkFunction(name,value):
"""Checks that an input is a function"""
if not inspect.isfunction(value):
raise TypeError("%s member must be a function" % name)
def checkDelete(name):
"""Check that we don't delete something"""
raise TypeError("Cannot delete the %s member" % name)
def checkScalarValuedFunction(name,value):
"""Check that we have a scalar-valued function"""
if not issubclass(type(value),ScalarValuedFunction):
raise TypeError("%s member must be a ScalarValuedFunction" % name)
def checkVectorValuedFunction(name,value):
"""Check that we have a vector-valued function"""
if not issubclass(type(value),VectorValuedFunction):
raise TypeError("%s member must be a VectorValuedFunction" % name)
def checkOperator(name,value):
"""Check that we have a linear operator"""
if not issubclass(type(value),Operator):
raise TypeError("%s member must be an Operator" % name)
def checkStaticMethod(vsname,name,value):
"""Check that we have a method"""
if not (
hasattr(value,name) and isinstance(value.__dict__[name],staticmethod)):
raise TypeError("%s member is required as a static member in " ^
"the vector space %s" % (name,vsname))
def checkVectorSpace(vsname,value):
"""Check that we have a valid-vector space"""
# Define all the functions we care about
fns=["init","copy","scal","zero","axpy","innr","rand"]
# Now, check each of these
map(lambda name:checkStaticMethod(vsname,name,value),fns)
def checkEuclidean(vsname,value):
"""Check that we have a valid Euclidean-Jordan algebra"""
# Check that we have a valid vector space
checkVectorSpace(vsname,value)
# Define all the new functions we care about
fns=["prod","id","linv","barr","srch","symm"]
# Now, check each of these
map(lambda name:checkStaticMethod(vsname,name,value),fns)
def checkMessaging(name,value):
"""Check that we have a messaging object"""
if not inspect.isfunction(value):
raise TypeError("%s argument must be a messaging function" %(name))
def checkString(name,value):
"""Check that we have a string object"""
if not isinstance(value,str):
raise TypeError("%s argument must be a string" % (name))
def checkStateManipulator(name,value):
"""Check that we have a state manipulator"""
if not issubclass(type(value),StateManipulator):
raise TypeError("%s argument must be a StateManipulator object"
% (name))
def checkType(name,value):
"""Check that we have a type"""
if type(value)!=type(type):
raise TypeError("%s argument must be a type" % (name))
def checkVectors(name,value):
"""Check that we have a list of restart vectors"""
if not issubclass(type(value),list):
raise TypeError("%s argument must be a list" % (name))
map(lambda i_x:checkString("%s[%d][0]" % (name,i_x[0]),i_x[1][0]),
enumerate(value))
def checkReals(name,value):
"""Check that we have a list of restart reals"""
if not issubclass(type(value),list):
raise TypeError("%s argument must be a list" % (name))
map(lambda i_x:checkString("%s[%d][0]" % (name,i_x[0]),i_x[1][0]),
enumerate(value))
map(lambda i_x:checkString("%s[%d][1]" % (name,i_x[0]),i_x[1][1]),
enumerate(value))
def checkNaturals(name,value):
"""Check that we have a list of restart naturals"""
if not issubclass(type(value),list):
raise TypeError("%s argument must be a list" % (name))
map(lambda i_x:checkString("%s[%d][0]" % (name,i_x[0]),i_x[1][0]),
enumerate(value))
map(lambda i_x:checkString("%s[%d][1]" % (name,i_x[0]),i_x[1][1]),
enumerate(value))
def checkParams(name,value):
"""Check that we have a list of restart parameters"""
if not issubclass(type(value),list):
raise TypeError("%s argument must be a list" % (name))
map(lambda i_x:checkString("%s[%d][0]" % (name,i_x[0]),i_x[1][0]),
enumerate(value))
map(lambda i_x:checkString("%s[%d][1]" % (name,i_x[0]),i_x[1][1]),
enumerate(value))
def createFloatProperty(name,desc):
"""Create a floating-point property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkFloat(name,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createNatProperty(name,desc):
"""Create a natural number property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkNatural(name,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createEnumProperty(name,enum,desc):
"""Create an enumerated type property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkEnum(name,value)
checkEnumRange(name,enum,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createFunctionProperty(name,desc):
"""Create a function property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkFunction(name,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createVectorProperty(name,desc):
"""Create a vector property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createVectorListProperty(name,desc):
"""Create a list of vectors property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkVectorList(name,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createScalarValuedFunctionProperty(name,desc):
"""Create a scalar-valued function property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkScalarValuedFunction(name,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createVectorValuedFunctionProperty(name,desc):
"""Create a vector-valued function property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkVectorValuedFunction(name,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
def createOperatorProperty(name,desc):
"""Create an operator property"""
def getter(self):
return self.__dict__["_%s" % name]
def setter(self, value):
checkOperator(name,value)
self.__dict__["_%s" % name] = value
def deleter(self):
checkDelete(name)
return property(getter,setter,deleter,desc)
| |
import copy
import time
import unittest
import numpy as np
from fate_arch.session import computing_session as session
from sklearn.preprocessing import StandardScaler as SSL
from federatedml.feature.feature_scale.standard_scale import StandardScale
from federatedml.feature.instance import Instance
from federatedml.param.scale_param import ScaleParam
from federatedml.util.param_extract import ParamExtract
class TestStandardScaler(unittest.TestCase):
def setUp(self):
self.test_data = [
[0, 1.0, 10, 2, 3, 1],
[1.0, 2, 9, 2, 4, 2],
[0, 3.0, 8, 3, 3, 3],
[1.0, 4, 7, 4, 4, 4],
[1.0, 5, 6, 5, 5, 5],
[1.0, 6, 5, 6, 6, -100],
[0, 7.0, 4, 7, 7, 7],
[0, 8, 3.0, 8, 6, 8],
[0, 9, 2, 9.0, 9, 9],
[0, 10, 1, 10.0, 10, 10]
]
str_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
session.init(str_time)
self.test_instance = []
for td in self.test_data:
self.test_instance.append(Instance(features=np.array(td)))
self.table_instance = self.data_to_table(self.test_instance)
self.table_instance.schema['header'] = ["fid" + str(i) for i in range(len(self.test_data[0]))]
def print_table(self, table):
for v in (list(table.collect())):
print(v[1].features)
def data_to_table(self, data, partition=10):
data_table = session.parallelize(data, include_key=False, partition=partition)
return data_table
def get_table_instance_feature(self, table_instance):
res_list = []
for k, v in list(table_instance.collect()):
res_list.append(list(np.around(v.features, 4)))
return res_list
def get_scale_param(self):
component_param = {
"method": "standard_scale",
"mode": "normal",
"scale_col_indexes": [],
"with_mean": True,
"with_std": True,
}
scale_param = ScaleParam()
param_extracter = ParamExtract()
param_extracter.parse_param_from_config(scale_param, component_param)
return scale_param
# test with (with_mean=True, with_std=True):
def test_fit1(self):
scale_param = self.get_scale_param()
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
scaler = SSL()
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(scaler.transform(self.test_data), 4).tolist())
self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4)))
self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4)))
# test with (with_mean=False, with_std=True):
def test_fit2(self):
scale_param = self.get_scale_param()
scale_param.with_mean = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
scaler = SSL(with_mean=False)
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(scaler.transform(self.test_data), 4).tolist())
self.assertListEqual(list(np.around(mean, 4)), [0 for _ in mean])
self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4)))
# test with (with_mean=True, with_std=False):
def test_fit3(self):
scale_param = self.get_scale_param()
scale_param.with_std = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
scaler = SSL(with_std=False)
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(scaler.transform(self.test_data), 4).tolist())
self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4)))
self.assertListEqual(list(np.around(std, 4)), [1 for _ in std])
# test with (with_mean=False, with_std=False):
def test_fit4(self):
scale_param = self.get_scale_param()
scale_param.with_std = False
scale_param.with_mean = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
scaler = SSL(with_mean=False, with_std=False)
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(scaler.transform(self.test_data), 4).tolist())
self.assertEqual(mean, [0 for _ in range(len(self.test_data[0]))])
self.assertEqual(std, [1 for _ in range(len(self.test_data[0]))])
# test with (area="all", scale_column_idx=[], with_mean=True, with_std=True):
def test_fit5(self):
scale_param = self.get_scale_param()
scale_param.scale_column_idx = []
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
scaler = SSL()
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(scaler.transform(self.test_data), 4).tolist())
self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4)))
self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4)))
# test with (area="col", scale_column_idx=[], with_mean=True, with_std=True):
def test_fit6(self):
scale_param = self.get_scale_param()
scale_param.scale_col_indexes = []
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
scaler = SSL()
scaler.fit(self.test_data)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(self.test_data, 4).tolist())
self.assertListEqual(list(np.around(mean, 4)), list(np.around(scaler.mean_, 4)))
self.assertListEqual(list(np.around(std, 4)), list(np.around(scaler.scale_, 4)))
# test with (area="all", upper=2, lower=1, with_mean=False, with_std=False):
def test_fit7(self):
scale_param = self.get_scale_param()
scale_param.scale_column_idx = []
scale_param.feat_upper = 2
scale_param.feat_lower = 1
scale_param.with_mean = False
scale_param.with_std = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
column_max_value = standard_scaler.column_max_value
column_min_value = standard_scaler.column_min_value
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if value > 2:
self.test_data[i][j] = 2
elif value < 1:
self.test_data[i][j] = 1
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(self.test_data, 4).tolist())
self.assertEqual(mean, [0 for _ in range(len(self.test_data[0]))])
self.assertEqual(std, [1 for _ in range(len(self.test_data[0]))])
self.assertEqual(column_max_value, [1, 2, 2, 2, 2, 2])
self.assertEqual(column_min_value, [1, 1, 1, 2, 2, 1])
# test with (area="all", upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1], with_mean=False, with_std=False):
def test_fit8(self):
scale_param = self.get_scale_param()
scale_param.scale_column_idx = []
scale_param.feat_upper = [2, 2, 2, 2, 2, 2]
scale_param.feat_lower = [1, 1, 1, 1, 1, 1]
scale_param.with_mean = False
scale_param.with_std = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
column_max_value = standard_scaler.column_max_value
column_min_value = standard_scaler.column_min_value
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if value > 2:
self.test_data[i][j] = 2
elif value < 1:
self.test_data[i][j] = 1
self.assertListEqual(self.get_table_instance_feature(fit_instance),
np.around(self.test_data, 4).tolist())
self.assertEqual(mean, [0 for _ in range(len(self.test_data[0]))])
self.assertEqual(std, [1 for _ in range(len(self.test_data[0]))])
self.assertEqual(column_max_value, [1, 2, 2, 2, 2, 2])
self.assertEqual(column_min_value, [1, 1, 1, 2, 2, 1])
# test with (area="col", upper=[2,2,2,2,2,2], lower=[1,1,1,1,1,1], scale_column_idx=[1,2,4], with_mean=True, with_std=True):
def test_fit9(self):
scale_column_idx = [1, 2, 4]
scale_param = self.get_scale_param()
scale_param.feat_upper = [2, 2, 2, 2, 2, 2]
scale_param.feat_lower = [1, 1, 1, 1, 1, 1]
scale_param.with_mean = True
scale_param.with_std = True
scale_param.scale_col_indexes = scale_column_idx
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
column_max_value = standard_scaler.column_max_value
column_min_value = standard_scaler.column_min_value
raw_data = copy.deepcopy(self.test_data)
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if j in scale_column_idx:
if value > 2:
self.test_data[i][j] = 2
elif value < 1:
self.test_data[i][j] = 1
scaler = SSL(with_mean=True, with_std=True)
scaler.fit(self.test_data)
transform_data = np.around(scaler.transform(self.test_data), 4).tolist()
for i, line in enumerate(transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
transform_data[i][j] = raw_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_instance),
transform_data)
self.assertListEqual(list(np.around(mean, 6)), list(np.around(scaler.mean_, 6)))
self.assertListEqual(list(np.around(std, 6)), list(np.around(scaler.scale_, 6)))
self.assertEqual(column_max_value, [1, 2, 2, 10, 2, 10])
self.assertEqual(column_min_value, [0, 1, 1, 2, 2, -100])
raw_data_transform = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(raw_data_transform))
# test with (mode="cap", area="col", upper=0.8, lower=0.2, scale_column_idx=[1,2,4], with_mean=True, with_std=True):
def test_fit10(self):
scale_column_idx = [1, 2, 4]
scale_param = self.get_scale_param()
scale_param.scale_col_indexes = []
scale_param.feat_upper = 0.8
scale_param.feat_lower = 0.2
scale_param.with_mean = True
scale_param.with_std = True
scale_param.mode = "cap"
scale_param.scale_col_indexes = scale_column_idx
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
mean = standard_scaler.mean
std = standard_scaler.std
column_max_value = standard_scaler.column_max_value
column_min_value = standard_scaler.column_min_value
gt_cap_lower_list = [0, 2, 2, 2, 3, 1]
gt_cap_upper_list = [1, 8, 8, 8, 7, 8]
raw_data = copy.deepcopy(self.test_data)
for i, line in enumerate(self.test_data):
for j, value in enumerate(line):
if j in scale_column_idx:
if value > gt_cap_upper_list[j]:
self.test_data[i][j] = gt_cap_upper_list[j]
elif value < gt_cap_lower_list[j]:
self.test_data[i][j] = gt_cap_lower_list[j]
scaler = SSL(with_mean=True, with_std=True)
scaler.fit(self.test_data)
transform_data = np.around(scaler.transform(self.test_data), 4).tolist()
for i, line in enumerate(transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
transform_data[i][j] = raw_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_instance),
transform_data)
self.assertEqual(column_max_value, gt_cap_upper_list)
self.assertEqual(column_min_value, gt_cap_lower_list)
self.assertListEqual(list(np.around(mean, 6)), list(np.around(scaler.mean_, 6)))
self.assertListEqual(list(np.around(std, 6)), list(np.around(scaler.scale_, 6)))
raw_data_transform = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(fit_instance),
self.get_table_instance_feature(raw_data_transform))
# test with (with_mean=True, with_std=True):
def test_transform1(self):
scale_param = self.get_scale_param()
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(transform_data),
self.get_table_instance_feature(fit_instance))
# test with (with_mean=True, with_std=False):
def test_transform2(self):
scale_param = self.get_scale_param()
scale_param.with_std = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(transform_data),
self.get_table_instance_feature(fit_instance))
# test with (with_mean=False, with_std=True):
def test_transform3(self):
scale_param = self.get_scale_param()
scale_param.with_mean = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(transform_data),
self.get_table_instance_feature(fit_instance))
# test with (with_mean=False, with_std=False):
def test_transform4(self):
scale_param = self.get_scale_param()
scale_param.with_mean = False
scale_param.with_std = False
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(transform_data),
self.get_table_instance_feature(fit_instance))
# test with (area='all', scale_column_idx=[], with_mean=False, with_std=False):
def test_transform5(self):
scale_param = self.get_scale_param()
scale_param.with_mean = False
scale_param.with_std = False
scale_param.scale_column_idx = []
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(transform_data),
self.get_table_instance_feature(fit_instance))
# test with (area='col', with_mean=[], with_std=False):
def test_transform6(self):
scale_param = self.get_scale_param()
scale_param.with_mean = False
scale_param.with_std = False
scale_param.scale_column_idx = []
standard_scaler = StandardScale(scale_param)
fit_instance = standard_scaler.fit(self.table_instance)
transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(transform_data),
self.get_table_instance_feature(fit_instance))
def test_cols_select_fit_and_transform(self):
scale_param = self.get_scale_param()
scale_param.scale_column_idx = [1, 2, 4]
standard_scaler = StandardScale(scale_param)
fit_data = standard_scaler.fit(self.table_instance)
scale_column_idx = standard_scaler.scale_column_idx
scaler = SSL(with_mean=True, with_std=True)
scaler.fit(self.test_data)
transform_data = np.around(scaler.transform(self.test_data), 4).tolist()
for i, line in enumerate(transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
transform_data[i][j] = self.test_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_data),
transform_data)
std_scale_transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(std_scale_transform_data),
transform_data)
def test_cols_select_fit_and_transform_repeat(self):
scale_param = self.get_scale_param()
scale_param.scale_column_idx = [1, 1, 2, 2, 4, 5, 5]
standard_scaler = StandardScale(scale_param)
fit_data = standard_scaler.fit(self.table_instance)
scale_column_idx = standard_scaler.scale_column_idx
scaler = SSL(with_mean=True, with_std=True)
scaler.fit(self.test_data)
transform_data = np.around(scaler.transform(self.test_data), 4).tolist()
for i, line in enumerate(transform_data):
for j, cols in enumerate(line):
if j not in scale_column_idx:
transform_data[i][j] = self.test_data[i][j]
self.assertListEqual(self.get_table_instance_feature(fit_data),
transform_data)
std_scale_transform_data = standard_scaler.transform(self.table_instance)
self.assertListEqual(self.get_table_instance_feature(std_scale_transform_data),
transform_data)
def tearDown(self):
session.stop()
if __name__ == "__main__":
unittest.main()
| |
# Copyright (c) 2013-2014
# Harvard FAS Research Computing
# All rights reserved.
"""general utilities"""
import os, select, subprocess, socket
#--- LazyDict
class LazyDict(dict):
"""A dict-like structure for lazily pulling in data only when needed.
A value of None means this has tried to get the data and it is not available or applicable.
A KeyError means data retrieval has been blocked by LAZINESS_LOCKED or it's an invalid key.
"""
LAZINESS_LOCKED = 0
#the data is considered complete -- no more queries will be issued
LAZINESS_DATA_OPTIMIZED = 1
#data size is minimized, by not pre-fetching or loading any extra attributes
LAZINESS_QUERY_OPTIMIZED = 2
#queries are minimized, by pre-fetching and loading as many attributes as possible per query
#subclasses should override these
keys = []
primary_key = None
def __init__(self, *arg, **kw):
dict.__init__(self, *arg, **kw)
self._laziness = self.LAZINESS_QUERY_OPTIMIZED
def __str__(self):
try:
s = self[self.primary_key]
except KeyError:
s = '(empty)'
return '<%s %s>' % (self.primary_key, s)
def __getitem__(self, key):
"""__getitem__
This method itself is rather optimized for LAZINESS_QUERY_OPTIMIZED, as
it uses try/except instead of if/hasattr, which assumes success will be
more frequent than failure.
"""
try:
return dict.__getitem__(self, key)
except KeyError:
if self._laziness == self.LAZINESS_LOCKED:
raise
else:
self.load_data(keys=[key,])
return dict.__getitem__(self, key) #a key error at this point really means it's not available
def set_laziness(self, laziness):
"""Set this instance's laziness."""
if laziness == self.LAZINESS_DATA_OPTIMIZED:
raise NotImplementedError("LAZINESS_DATA_OPTIMIZED is not yet implemented")
self._laziness = laziness
def load_data(self, keys=[]):
"""Load data.
If keys is the empty list, this should load as much as possible, within
the laziness constraints.
"""
raise NotImplementedError()
#--- basic resource utilization
def get_hostname():
"""Return the short hostname of the current host."""
return socket.gethostname().split('.',1)[0]
def get_cpu():
"""Return the CPU capacity and usage on this host.
The returns a two-item list:
[total number of cores (int), number of running tasks (int)]
This is just a normal resource computation, independent of Slurm.
The number of running tasks is from the 4th colum of /proc/loadavg;
it's decremented by one in order to account for this process asking for it.
"""
#running processes
with open('/proc/loadavg','r') as f:
#e.g. 52.10 52.07 52.04 53/2016 54847 -> 53-1 = 52
used = max(int(f.read().split()[3].split('/')[0]) - 1, 0)
with open('/proc/cpuinfo','r') as f:
total = 0
for l in f.readlines():
if l.startswith('processor'):
total += 1
return total, used
def get_mem():
"""Return the memory capacity and usage on this host.
This returns a two-item list:
[total memory in kB (int), used memory in kB (int)]
This is just a normal resource computation, independent of Slurm.
The used memory does not count Buffers, Cached, and SwapCached.
"""
with open('/proc/meminfo','r') as f:
total = 0
free = 0
for line in f.readlines():
fields = line.split()
if fields[0]=='MemTotal:':
total = int(fields[1])
if fields[0] in ('MemFree:', 'Buffers:', 'Cached:', 'SwapCached'):
free += int(fields[1])
used = total - free
return total, used
#--- subprocess handling
def shquote(text):
"""Return the given text as a single, safe string in sh code.
Note that this leaves literal newlines alone; sh and bash are fine with
that, but other tools may require special handling.
"""
return "'%s'" % text.replace("'", r"'\''")
def sherrcheck(sh=None, stderr=None, returncode=None, verbose=True):
"""Raise an exception if the parameters indicate an error.
This raises an Exception if stderr is non-empty, even if returncode is
zero. Set verbose to False to keep sh and stderr from appearing in the
Exception.
"""
if (returncode is not None and returncode!=0) or (stderr is not None and stderr!=''):
msg = "shell code"
if verbose: msg += " [%s]" % repr(sh)
if returncode is not None:
if returncode>=0:
msg += " failed with exit status [%d]" % returncode
else:
msg += " killed by signal [%d]" % -returncode
if stderr is not None:
if verbose: msg += ", stderr is [%s]" % repr(stderr)
raise Exception(msg)
def runsh(sh):
"""Run shell code and return stdout.
This raises an Exception if exit status is non-zero or stderr is non-empty.
"""
if type(sh)==type(''):
shell=True
else:
shell=False
p = subprocess.Popen(
sh,
shell=shell,
stdin=open('/dev/null', 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
sherrcheck(sh, stderr, p.returncode)
return stdout
def runsh_i(sh):
"""Run shell code and yield stdout lines.
This raises an Exception if exit status is non-zero or stderr is non-empty.
Be sure to fully iterate this or you will probably leave orphans.
"""
BLOCK_SIZE = 4096
if type(sh)==type(''):
shell=True
else:
shell=False
p = subprocess.Popen(
sh,
shell=shell,
stdin=open('/dev/null', 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdoutDone, stderrDone = False, False
stdout = ''
stderr = ''
while not (stdoutDone and stderrDone):
rfds, ignored, ignored2 = select.select([p.stdout.fileno(), p.stderr.fileno()], [], [])
if p.stdout.fileno() in rfds:
s = os.read(p.stdout.fileno(), BLOCK_SIZE)
if s=='':
stdoutDone = True
else:
i = 0
j = s.find('\n')
while j!=-1:
yield stdout + s[i:j+1]
stdout = ''
i = j+1
j = s.find('\n',i)
stdout += s[i:]
if p.stderr.fileno() in rfds:
s = os.read(p.stderr.fileno(), BLOCK_SIZE)
if s=='':
stderrDone = True
else:
stderr += s
if stdout!='':
yield stdout
sherrcheck(sh, stderr, p.wait())
if __name__=='__main__':
pass
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.asset_v1p5beta1.services.asset_service import AssetServiceAsyncClient
from google.cloud.asset_v1p5beta1.services.asset_service import AssetServiceClient
from google.cloud.asset_v1p5beta1.services.asset_service import pagers
from google.cloud.asset_v1p5beta1.services.asset_service import transports
from google.cloud.asset_v1p5beta1.types import asset_service
from google.cloud.asset_v1p5beta1.types import assets
from google.oauth2 import service_account
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert AssetServiceClient._get_default_mtls_endpoint(None) is None
assert (
AssetServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
AssetServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert AssetServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient,])
def test_asset_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudasset.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.AssetServiceGrpcTransport, "grpc"),
(transports.AssetServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_asset_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient,])
def test_asset_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "cloudasset.googleapis.com:443"
def test_asset_service_client_get_transport_class():
transport = AssetServiceClient.get_transport_class()
available_transports = [
transports.AssetServiceGrpcTransport,
]
assert transport in available_transports
transport = AssetServiceClient.get_transport_class("grpc")
assert transport == transports.AssetServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
def test_asset_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(AssetServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(AssetServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc", "true"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc", "false"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_asset_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [AssetServiceClient, AssetServiceAsyncClient])
@mock.patch.object(
AssetServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AssetServiceClient)
)
@mock.patch.object(
AssetServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(AssetServiceAsyncClient),
)
def test_asset_service_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport, "grpc"),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_asset_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AssetServiceClient,
transports.AssetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_asset_service_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_asset_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.asset_v1p5beta1.services.asset_service.transports.AssetServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = AssetServiceClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
AssetServiceClient,
transports.AssetServiceGrpcTransport,
"grpc",
grpc_helpers,
),
(
AssetServiceAsyncClient,
transports.AssetServiceGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_asset_service_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"cloudasset.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="cloudasset.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("request_type", [asset_service.ListAssetsRequest, dict,])
def test_list_assets(request_type, transport: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = asset_service.ListAssetsResponse(
next_page_token="next_page_token_value",
)
response = client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_assets_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
client.list_assets()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListAssetsRequest()
@pytest.mark.asyncio
async def test_list_assets_async(
transport: str = "grpc_asyncio", request_type=asset_service.ListAssetsRequest
):
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListAssetsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == asset_service.ListAssetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListAssetsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_assets_async_from_dict():
await test_list_assets_async(request_type=dict)
def test_list_assets_field_headers():
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = asset_service.ListAssetsResponse()
client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_assets_field_headers_async():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = asset_service.ListAssetsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
asset_service.ListAssetsResponse()
)
await client.list_assets(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_assets_pager(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_assets(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, assets.Asset) for i in results)
def test_list_assets_pages(transport_name: str = "grpc"):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials, transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_assets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
pages = list(client.list_assets(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_assets_async_pager():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
async_pager = await client.list_assets(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, assets.Asset) for i in responses)
@pytest.mark.asyncio
async def test_list_assets_async_pages():
client = AssetServiceAsyncClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_assets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
asset_service.ListAssetsResponse(
assets=[assets.Asset(), assets.Asset(), assets.Asset(),],
next_page_token="abc",
),
asset_service.ListAssetsResponse(assets=[], next_page_token="def",),
asset_service.ListAssetsResponse(
assets=[assets.Asset(),], next_page_token="ghi",
),
asset_service.ListAssetsResponse(assets=[assets.Asset(), assets.Asset(),],),
RuntimeError,
)
pages = []
async for page_ in (await client.list_assets(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AssetServiceClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = AssetServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = AssetServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.AssetServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.AssetServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.AssetServiceGrpcTransport,
transports.AssetServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AssetServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.AssetServiceGrpcTransport,)
def test_asset_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.AssetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_asset_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.asset_v1p5beta1.services.asset_service.transports.AssetServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.AssetServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("list_assets",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_asset_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.asset_v1p5beta1.services.asset_service.transports.AssetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AssetServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_asset_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.asset_v1p5beta1.services.asset_service.transports.AssetServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.AssetServiceTransport()
adc.assert_called_once()
def test_asset_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
AssetServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.AssetServiceGrpcTransport,
transports.AssetServiceGrpcAsyncIOTransport,
],
)
def test_asset_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.AssetServiceGrpcTransport, grpc_helpers),
(transports.AssetServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_asset_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"cloudasset.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="cloudasset.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_asset_service_host_no_port():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudasset.googleapis.com"
),
)
assert client.transport._host == "cloudasset.googleapis.com:443"
def test_asset_service_host_with_port():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="cloudasset.googleapis.com:8000"
),
)
assert client.transport._host == "cloudasset.googleapis.com:8000"
def test_asset_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AssetServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_asset_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.AssetServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.AssetServiceGrpcTransport, transports.AssetServiceGrpcAsyncIOTransport],
)
def test_asset_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_access_level_path():
access_policy = "squid"
access_level = "clam"
expected = "accessPolicies/{access_policy}/accessLevels/{access_level}".format(
access_policy=access_policy, access_level=access_level,
)
actual = AssetServiceClient.access_level_path(access_policy, access_level)
assert expected == actual
def test_parse_access_level_path():
expected = {
"access_policy": "whelk",
"access_level": "octopus",
}
path = AssetServiceClient.access_level_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_access_level_path(path)
assert expected == actual
def test_access_policy_path():
access_policy = "oyster"
expected = "accessPolicies/{access_policy}".format(access_policy=access_policy,)
actual = AssetServiceClient.access_policy_path(access_policy)
assert expected == actual
def test_parse_access_policy_path():
expected = {
"access_policy": "nudibranch",
}
path = AssetServiceClient.access_policy_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_access_policy_path(path)
assert expected == actual
def test_asset_path():
expected = "*".format()
actual = AssetServiceClient.asset_path()
assert expected == actual
def test_service_perimeter_path():
access_policy = "cuttlefish"
service_perimeter = "mussel"
expected = "accessPolicies/{access_policy}/servicePerimeters/{service_perimeter}".format(
access_policy=access_policy, service_perimeter=service_perimeter,
)
actual = AssetServiceClient.service_perimeter_path(access_policy, service_perimeter)
assert expected == actual
def test_parse_service_perimeter_path():
expected = {
"access_policy": "winkle",
"service_perimeter": "nautilus",
}
path = AssetServiceClient.service_perimeter_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_service_perimeter_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "scallop"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = AssetServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "abalone",
}
path = AssetServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "squid"
expected = "folders/{folder}".format(folder=folder,)
actual = AssetServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "clam",
}
path = AssetServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "whelk"
expected = "organizations/{organization}".format(organization=organization,)
actual = AssetServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "octopus",
}
path = AssetServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "oyster"
expected = "projects/{project}".format(project=project,)
actual = AssetServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "nudibranch",
}
path = AssetServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "cuttlefish"
location = "mussel"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = AssetServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "winkle",
"location": "nautilus",
}
path = AssetServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = AssetServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.AssetServiceTransport, "_prep_wrapped_messages"
) as prep:
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.AssetServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = AssetServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = AssetServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = AssetServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(AssetServiceClient, transports.AssetServiceGrpcTransport),
(AssetServiceAsyncClient, transports.AssetServiceGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| |
"""
Provides tests for the syncing functionalities in django entity.
"""
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.test.utils import override_settings
from django_dynamic_fixture import G, F
from entity.config import EntityRegistry
from entity.models import Entity, EntityRelationship, EntityKind
from entity.sync import EntitySyncer, sync_entities
from entity.signal_handlers import turn_on_syncing, turn_off_syncing
from mock import patch
from entity.tests.models import (
Account, Team, EntityPointer, DummyModel, MultiInheritEntity, AccountConfig, TeamConfig, TeamGroup,
M2mEntity, PointsToM2mEntity, PointsToAccount, Competitor
)
from entity.tests.utils import EntityTestCase
class TestTurnOnOffSyncing(EntityTestCase):
"""
Tests turning on and off entity syncing.
"""
@patch('entity.signal_handlers.post_save', spec_set=True)
@patch('entity.signal_handlers.post_delete', spec_set=True)
@patch('entity.signal_handlers.m2m_changed', spec_set=True)
@patch('entity.signal_handlers.post_bulk_operation', spec_set=True)
def test_turn_on_syncing_all_handlers_true(
self, post_bulk_operation_mock, m2m_changed_mock, post_delete_mock, post_save_mock):
turn_on_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=True)
self.assertTrue(post_save_mock.connect.called)
self.assertTrue(post_delete_mock.connect.called)
self.assertTrue(m2m_changed_mock.connect.called)
self.assertTrue(post_bulk_operation_mock.connect.called)
@patch('entity.signal_handlers.post_save', spec_set=True)
@patch('entity.signal_handlers.post_delete', spec_set=True)
@patch('entity.signal_handlers.m2m_changed', spec_set=True)
@patch('entity.signal_handlers.post_bulk_operation', spec_set=True)
def test_turn_on_syncing_all_handlers_false(
self, post_bulk_operation_mock, m2m_changed_mock, post_delete_mock, post_save_mock):
turn_on_syncing(
for_post_save=False, for_post_delete=False, for_m2m_changed=False, for_post_bulk_operation=False)
self.assertFalse(post_save_mock.connect.called)
self.assertFalse(post_delete_mock.connect.called)
self.assertFalse(m2m_changed_mock.connect.called)
self.assertFalse(post_bulk_operation_mock.connect.called)
@patch('entity.signal_handlers.post_save', spec_set=True)
@patch('entity.signal_handlers.post_delete', spec_set=True)
@patch('entity.signal_handlers.m2m_changed', spec_set=True)
@patch('entity.signal_handlers.post_bulk_operation', spec_set=True)
def test_turn_off_syncing_all_handlers_true(
self, post_bulk_operation_mock, m2m_changed_mock, post_delete_mock, post_save_mock):
turn_off_syncing(for_post_save=True, for_post_delete=True, for_m2m_changed=True, for_post_bulk_operation=True)
self.assertTrue(post_save_mock.disconnect.called)
self.assertTrue(post_delete_mock.disconnect.called)
self.assertTrue(m2m_changed_mock.disconnect.called)
self.assertTrue(post_bulk_operation_mock.disconnect.called)
@patch('entity.signal_handlers.post_save', spec_set=True)
@patch('entity.signal_handlers.post_delete', spec_set=True)
@patch('entity.signal_handlers.m2m_changed', spec_set=True)
@patch('entity.signal_handlers.post_bulk_operation', spec_set=True)
def test_turn_off_syncing_all_handlers_false(
self, post_bulk_operation_mock, m2m_changed_mock, post_delete_mock, post_save_mock):
turn_off_syncing(
for_post_save=False, for_post_delete=False, for_m2m_changed=False, for_post_bulk_operation=False)
self.assertFalse(post_save_mock.disconnect.called)
self.assertFalse(post_delete_mock.disconnect.called)
self.assertFalse(m2m_changed_mock.disconnect.called)
self.assertFalse(post_bulk_operation_mock.disconnect.called)
def test_post_save_turned_on_by_default(self):
"""
Tests that save signals are connected by default.
"""
with patch('entity.signal_handlers.sync_entities') as mock_handler:
Account.objects.create()
self.assertTrue(mock_handler.called)
def test_post_delete_turned_on_by_default(self):
"""
Tests that delete signals are connected by default.
"""
a = Account.objects.create()
with patch('entity.models.Entity.all_objects.delete_for_obj') as mock_handler:
# Delete the object. The signal should be called
a.delete()
self.assertEquals(mock_handler.call_count, 1)
def test_bulk_operation_turned_off_by_default(self):
"""
Tests that bulk operations are turned off by default.
"""
with patch('entity.signal_handlers.sync_entities') as mock_handler:
Account.objects.bulk_create([Account() for i in range(5)])
self.assertFalse(mock_handler.called)
def test_turn_off_save(self):
"""
Tests turning off syncing for the save signal.
"""
turn_off_syncing()
with patch('entity.signal_handlers.sync_entities') as mock_handler:
Account.objects.create()
self.assertFalse(mock_handler.called)
def test_turn_off_delete(self):
"""
Tests turning off syncing for the delete signal.
"""
turn_off_syncing()
with patch('entity.signal_handlers.sync_entities') as mock_handler:
a = Account.objects.create()
self.assertFalse(mock_handler.called)
a.delete()
self.assertFalse(mock_handler.called)
def test_turn_off_bulk(self):
"""
Tests turning off syncing for bulk operations.
"""
turn_off_syncing()
with patch('entity.signal_handlers.sync_entities') as mock_handler:
Account.objects.bulk_create([Account() for i in range(5)])
self.assertFalse(mock_handler.called)
def test_turn_on_save(self):
"""
Tests turning on syncing for the save signal.
"""
turn_off_syncing()
turn_on_syncing()
with patch('entity.signal_handlers.sync_entities') as mock_handler:
Account.objects.create()
self.assertTrue(mock_handler.called)
def test_turn_on_delete(self):
"""
Tests turning on syncing for the delete signal.
"""
turn_off_syncing()
turn_on_syncing()
with patch('entity.models.Entity.all_objects.delete_for_obj') as mock_handler:
a = Account.objects.create()
a.delete()
self.assertEquals(mock_handler.call_count, 1)
def test_turn_on_bulk(self):
"""
Tests turning on syncing for bulk operations.
"""
turn_off_syncing()
turn_on_syncing(for_post_bulk_operation=True)
with patch('entity.signal_handlers.sync_entities') as mock_handler:
Account.objects.bulk_create([Account() for i in range(5)])
self.assertTrue(mock_handler.called)
class SyncAllEntitiesTest(EntityTestCase):
"""
Tests that all entities can be synced at once and tests the management command to
sync all entities.
"""
def test_sync_entities_management_command(self):
"""
Tests that the management command for syncing entities works properly.
"""
# Create five test accounts
turn_off_syncing()
for i in range(5):
Account.objects.create()
turn_on_syncing()
# Test that the management command syncs all five entities
self.assertEquals(Entity.objects.all().count(), 0)
call_command('sync_entities')
self.assertEquals(Entity.objects.all().count(), 5)
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')
def test_async_sync_entities_management_command(self):
"""
Tests that the sync_entities command works with the asynchronous option.
"""
# Create five test accounts without syncing on
turn_off_syncing()
for i in range(5):
Account.objects.create()
turn_on_syncing()
# Test that the management command syncs all five entities
self.assertEquals(Entity.objects.all().count(), 0)
call_command('sync_entities', async=True)
self.assertEquals(Entity.objects.all().count(), 5)
def test_sync_dummy_data(self):
"""
Tests that dummy data (i.e data that does not inherit EntityModelMixin) doesn't
get synced.
"""
# Create dummy data
DummyModel.objects.create()
# Sync all entities and verify that none were created
sync_entities()
self.assertEquals(Entity.objects.all().count(), 0)
def test_sync_multi_inherited_data(self):
"""
Test when models are synced that don't directly inherit EntityModelMixin.
"""
# Create an entity that does not directly inherit EntityModelMixin
MultiInheritEntity.objects.create()
# Sync all entities and verify that one was created
sync_entities()
self.assertEquals(Entity.objects.all().count(), 1)
def test_sync_all_account_no_teams(self):
"""
Tests syncing all accounts with no super entities.
"""
turn_off_syncing()
# Create five test accounts
accounts = [Account.objects.create() for i in range(5)]
turn_on_syncing()
# Sync all of the entities and verify that five Entity models were created for the Account model
self.assertEquals(Entity.objects.all().count(), 0)
sync_entities()
self.assertEquals(Entity.objects.all().count(), 5)
# Delete an account. When all entities are synced again,
# there should only be four accounts
turn_off_syncing()
accounts[0].delete()
turn_on_syncing()
self.assertEquals(Entity.objects.all().count(), 5)
sync_entities()
self.assertEquals(Entity.objects.all().count(), 4)
def test_sync_all_accounts_teams(self):
"""
Tests syncing of all accounts when they have super entities.
"""
# Create five test accounts
accounts = [Account.objects.create() for i in range(5)]
# Create two teams to assign to some of the accounts
teams = [Team.objects.create() for i in range(2)]
accounts[0].team = teams[0]
accounts[0].save()
accounts[1].team = teams[0]
accounts[1].save()
accounts[2].team = teams[1]
accounts[2].save()
accounts[3].team = teams[1]
accounts[3].save()
# Sync all the entities. There should be 7 (5 accounts 2 teams)
sync_entities()
self.assertEquals(Entity.objects.filter(entity_type=ContentType.objects.get_for_model(Account)).count(), 5)
self.assertEquals(Entity.objects.filter(entity_type=ContentType.objects.get_for_model(Team)).count(), 2)
self.assertEquals(Entity.objects.all().count(), 7)
# There should be four entity relationships since four accounts have teams
self.assertEquals(EntityRelationship.objects.all().count(), 4)
def test_sync_all_accounts_teams_inactive_entity_kind(self):
"""
Tests syncing of all accounts when they have super entities and the entiity kind is inactive
"""
# Create five test accounts
accounts = [Account.objects.create() for i in range(5)]
# Create two teams to assign to some of the accounts
teams = [Team.objects.create() for i in range(2)]
accounts[0].team = teams[0]
accounts[0].save()
accounts[1].team = teams[0]
accounts[1].save()
accounts[2].team = teams[1]
accounts[2].save()
accounts[3].team = teams[1]
accounts[3].save()
team_ek = EntityKind.objects.get(name='tests.team')
team_ek.delete()
# Sync all the entities. There should be 7 (5 accounts 2 teams)
sync_entities()
self.assertEquals(Entity.objects.filter(entity_type=ContentType.objects.get_for_model(Account)).count(), 5)
self.assertEquals(Entity.objects.filter(entity_type=ContentType.objects.get_for_model(Team)).count(), 2)
self.assertEquals(Entity.objects.all().count(), 7)
# There should be four entity relationships since four accounts have teams
self.assertEquals(EntityRelationship.objects.all().count(), 4)
class TestEntityBulkSignalSync(EntityTestCase):
"""
Tests syncing when bulk operations happen.
"""
def setUp(self):
super(TestEntityBulkSignalSync, self).setUp()
turn_on_syncing(for_post_bulk_operation=True)
def test_post_bulk_create(self):
"""
Tests that entities can have bulk creates applied to them and still be synced.
"""
# Bulk create five accounts
accounts = [Account() for i in range(5)]
Account.objects.bulk_create(accounts)
# Verify that there are 5 entities
self.assertEquals(Entity.objects.all().count(), 5)
def test_post_bulk_update(self):
"""
Calls a bulk update on a list of entities. Verifies that the models are appropriately
synced.
"""
# Create five accounts
for i in range(5):
Account.objects.create(email='test1@test.com')
# Verify that there are five entities all with the 'test1@test.com' email
for entity in Entity.objects.all():
self.assertEquals(entity.entity_meta['email'], 'test1@test.com')
self.assertEquals(Entity.objects.all().count(), 5)
# Bulk update the account emails to a different one
Account.objects.all().update(email='test2@test.com')
# Verify that the email was updated properly in all entities
for entity in Entity.objects.all():
self.assertEquals(entity.entity_meta['email'], 'test2@test.com')
self.assertEquals(Entity.objects.all().count(), 5)
def test_invalid_entity_model(self):
"""
Tests that an invalid entity model is not synced on bulk update.
"""
DummyModel.objects.bulk_create([DummyModel()])
self.assertFalse(Entity.objects.exists())
def test_post_bulk_update_dummy(self):
"""
Tests that even if the dummy model is using the special model manager for bulk
updates, it still does not get synced since it doesn't inherit EntityModelMixin.
"""
# Create five dummy models with a bulk update
DummyModel.objects.bulk_create([DummyModel() for i in range(5)])
# There should be no synced entities
self.assertEquals(Entity.objects.all().count(), 0)
class TestWatching(EntityTestCase):
"""
Tests when an entity is watching another model for changes.
"""
def test_m2m_changed_of_another_model(self):
"""
Tests when an entity model is listening for a change of an m2m of another model.
"""
m2m_entity = G(M2mEntity)
team = G(Team)
points_to_m2m_entity = G(PointsToM2mEntity, m2m_entity=m2m_entity)
# Three entities should be synced and there should not yet be any relationships
self.assertEquals(Entity.objects.count(), 3)
self.assertFalse(EntityRelationship.objects.exists())
# When a team is added to the m2m entity, it should be a super entity to the points_to_m2m_entity and
# of m2m_entity
m2m_entity.teams.add(team)
self.assertEquals(Entity.objects.count(), 3)
self.assertEquals(EntityRelationship.objects.count(), 2)
points_to_m2m_entity = Entity.objects.get_for_obj(points_to_m2m_entity)
team_entity = Entity.objects.get_for_obj(team)
m2m_entity = Entity.objects.get_for_obj(m2m_entity)
self.assertTrue(EntityRelationship.objects.filter(
sub_entity=points_to_m2m_entity, super_entity=team_entity).exists())
self.assertTrue(EntityRelationship.objects.filter(sub_entity=m2m_entity, super_entity=team_entity).exists())
def test_points_to_account_config_competitor_updated(self):
"""
Tests that a PointsToAccount model is updated when the competitor of its account is updated.
"""
account = G(Account)
pta = G(PointsToAccount, account=account)
pta_entity = Entity.objects.get_for_obj(pta)
self.assertEquals(pta_entity.entity_meta, {
'team_name': 'None',
'competitor_name': 'None',
})
team = G(Team, name='team1')
competitor = G(Competitor, name='competitor1')
account.team = team
account.competitor = competitor
account.save()
# Nothing should have been updated on the entity. This is because it is watching the competitor
# and team models for changes. Since these models were changed before they were linked to the
# account, the changes are not propagated.
pta_entity = Entity.objects.get_for_obj(pta)
self.assertEquals(pta_entity.entity_meta, {
'team_name': 'None',
'competitor_name': 'None',
})
# Now change names of the competitors and teams. Things will be propagated.
team.name = 'team2'
team.save()
pta_entity = Entity.objects.get_for_obj(pta)
self.assertEquals(pta_entity.entity_meta, {
'team_name': 'team2',
'competitor_name': 'competitor1',
})
competitor.name = 'competitor2'
competitor.save()
pta_entity = Entity.objects.get_for_obj(pta)
self.assertEquals(pta_entity.entity_meta, {
'team_name': 'team2',
'competitor_name': 'competitor2',
})
# The power of django entity compels you...
class TestEntityM2mChangedSignalSync(EntityTestCase):
"""
Tests when an m2m changes on a synced entity.
"""
def test_save_model_with_m2m(self):
"""
Verifies that the m2m test entity is synced properly upon save.
"""
turn_off_syncing()
m = G(M2mEntity)
m.teams.add(G(Team))
turn_on_syncing()
m.save()
self.assertEquals(Entity.objects.count(), 2)
self.assertEquals(EntityRelationship.objects.count(), 1)
def test_sync_when_m2m_add(self):
"""
Verifies an entity is synced properly when and m2m field is added.
"""
m = G(M2mEntity)
self.assertEquals(Entity.objects.count(), 1)
self.assertEquals(EntityRelationship.objects.count(), 0)
m.teams.add(G(Team))
self.assertEquals(Entity.objects.count(), 2)
self.assertEquals(EntityRelationship.objects.count(), 1)
def test_sync_when_m2m_delete(self):
"""
Verifies an entity is synced properly when and m2m field is deleted.
"""
m = G(M2mEntity)
team = G(Team)
m.teams.add(team)
self.assertEquals(Entity.objects.count(), 2)
self.assertEquals(EntityRelationship.objects.count(), 1)
m.teams.remove(team)
self.assertEquals(Entity.objects.count(), 2)
self.assertEquals(EntityRelationship.objects.count(), 0)
def test_sync_when_m2m_clear(self):
"""
Verifies an entity is synced properly when and m2m field is cleared.
"""
m = G(M2mEntity)
team = G(Team)
m.teams.add(team)
self.assertEquals(Entity.objects.count(), 2)
self.assertEquals(EntityRelationship.objects.count(), 1)
m.teams.clear()
self.assertEquals(Entity.objects.count(), 2)
self.assertEquals(EntityRelationship.objects.count(), 0)
class TestEntityPostSavePostDeleteSignalSync(EntityTestCase):
"""
Tests that entities (from the test models) are properly synced upon post_save
and post_delete signals.
"""
def test_going_from_inactive_to_active(self):
"""
Tests that an inactive entity can be activated and that its active attributes
are synced properly.
"""
a = Account.objects.create(email='test_email', is_active=False)
a.is_active = True
a.save()
e = Entity.all_objects.get_for_obj(a)
self.assertTrue(e.is_active)
def test_inactive_syncing(self):
"""
Tests that an inactive entity's activatable properties are synced properly.
"""
a = Account.objects.create(email='test_email', is_active=False)
e = Entity.all_objects.get_for_obj(a)
self.assertFalse(e.is_active)
def test_display_name_mirrored_default(self):
"""
Tests that the display name is mirrored to the __unicode__ of the models. This
is the default behavior.
"""
a = Account.objects.create(email='test_email')
e = Entity.objects.get_for_obj(a)
self.assertEquals(e.display_name, 'test_email')
def test_display_name_mirrored_custom(self):
"""
Tests that the display name is mirrored properly when a custom get_display_name
function is defined. In this case, the function for Teams returns 'team'
"""
t = G(Team)
e = Entity.objects.get_for_obj(t)
self.assertEquals(e.display_name, 'team')
def test_post_save_dummy_data(self):
"""
Tests that dummy data that does not inherit from EntityModelMixin is not synced
when saved.
"""
DummyModel.objects.create()
# Verify that no entities were created
self.assertEquals(Entity.objects.all().count(), 0)
def test_post_save_multi_inherit_model(self):
"""
Tests that a model that does not directly inherit EntityModelMixin is still synced.
"""
MultiInheritEntity.objects.create()
# Verify that one entity was synced
self.assertEquals(Entity.objects.all().count(), 1)
def test_post_delete_inactive_entity(self):
"""
Tests deleting an entity that was already inactive.
"""
account = Account.objects.create(is_active=False)
account.delete()
self.assertEquals(Entity.all_objects.all().count(), 0)
def test_post_delete_no_entity(self):
"""
Tests a post_delete on an account that has no current mirrored entity.
"""
# Create an account
account = Account.objects.create()
# Clear out the Entity table since post_save will create an entry for it
Entity.objects.all().delete()
# Delete the created model. No errors should occur and nothing should
# be in the entity table
account.delete()
self.assertEquals(Entity.objects.all().count(), 0)
def test_post_delete_account(self):
"""
Tests a post_delete on an account that has a current mirrored entity.
"""
# Create accounts for the test
main_account = Account.objects.create()
other_account = Account.objects.create()
# Clear out the Entity table since post_save will create an entry for it
Entity.objects.all().delete(force=True)
# Create entity entries for the account object and for another account
self.create_entity(main_account)
self.create_entity(other_account)
# Delete the created model. No errors should occur and the other account
# should still be an entity in the Entity table.
main_account.delete()
self.assertEquals(Entity.objects.all().count(), 1)
self.assertEquals(Entity.objects.filter(entity_id=other_account.id).count(), 1)
def test_post_delete_account_under_team(self):
"""
Tests the deletion of an account that had a relationship with a team.
"""
# Create a team
team = Team.objects.create(name='Team')
# Create an account under that team
account = Account.objects.create(email='test@test.com', team=team)
# There should be two entities and a relationship between them.
self.assertEquals(Entity.objects.all().count(), 2)
self.assertEquals(EntityRelationship.objects.all().count(), 1)
# Delete the account. The team entity should still exist
account.delete()
self.assertEquals(Entity.objects.all().count(), 1)
self.assertEquals(EntityRelationship.objects.all().count(), 0)
Entity.objects.get_for_obj(team)
def test_post_create_account_no_relationships_active(self):
"""
Tests that an Entity is created when the appropriate EntityModelMixin model is
created. Tests the case where the entity has no relationships.
"""
# Verify that there are no entities
self.assertEquals(Entity.objects.all().count(), 0)
# Create an account. An entity with no relationships should be created
account = Account.objects.create(email='test@test.com')
entity = Entity.objects.get_for_obj(account)
# Check that the metadata and is_active fields were set properly
self.assertEquals(entity.entity_meta, {
'email': 'test@test.com',
'is_captain': False,
'team': None,
'team_is_active': None,
})
self.assertEquals(entity.is_active, True)
self.assertEquals(entity.sub_relationships.all().count(), 0)
self.assertEquals(entity.super_relationships.all().count(), 0)
def test_post_create_account_relationships(self):
"""
Creates an account that has super relationships. Verifies that the entity table is updated
properly.
"""
# Verify that there are no entities
self.assertEquals(Entity.objects.all().count(), 0)
# Create a team
team = Team.objects.create(name='Team')
# Create an account under that team
account = Account.objects.create(email='test@test.com', team=team)
# There should be two entities. Test their existence and values
self.assertEquals(Entity.objects.all().count(), 2)
account_entity = Entity.objects.get_for_obj(account)
self.assertEquals(account_entity.entity_meta, {
'email': 'test@test.com',
'is_captain': False,
'team': 'Team',
'team_is_active': True,
})
team_entity = Entity.objects.get_for_obj(team)
self.assertEquals(team_entity.entity_meta, None)
# Check that the appropriate entity relationship was created
self.assertEquals(EntityRelationship.objects.all().count(), 1)
relationship = EntityRelationship.objects.first()
self.assertEquals(relationship.sub_entity, account_entity)
self.assertEquals(relationship.super_entity, team_entity)
def test_post_updated_entity_no_cascade(self):
"""
Verify that updating a mirrored entity does not cause the entity to be deleted (which
results in a cascading delete for all pointers.
"""
# Create a test account
account = Account.objects.create(email='test@test.com')
entity = Entity.objects.get_for_obj(account)
self.assertEquals(entity.entity_meta, {
'email': 'test@test.com',
'is_captain': False,
'team': None,
'team_is_active': None,
})
old_entity_id = entity.id
# Create an object that points to the entity. This object is created to verify that it isn't cascade
# deleted when the entity is updated
test_pointer = EntityPointer.objects.create(entity=entity)
# Now update the account
account.email = 'newemail@test.com'
account.save()
# Verify that the mirrored entity has the same ID
entity = Entity.objects.get_for_obj(account)
self.assertEquals(entity.entity_meta, {
'email': 'newemail@test.com',
'is_captain': False,
'team': None,
'team_is_active': None,
})
self.assertEquals(old_entity_id, entity.id)
# Verify that the pointer still exists and wasn't cascade deleted
test_pointer = EntityPointer.objects.get(id=test_pointer.id)
self.assertEquals(test_pointer.entity, entity)
def test_post_update_account_meta(self):
"""
Verifies that an account's metadata is updated properly in the mirrored tables.
"""
# Create an account and check it's mirrored metadata
account = Account.objects.create(email='test@test.com')
entity = Entity.objects.get_for_obj(account)
self.assertEquals(entity.entity_meta, {
'email': 'test@test.com',
'is_captain': False,
'team': None,
'team_is_active': None,
})
# Update the account's metadata and check that it is mirrored
account.email = 'newemail@test.com'
account.save()
entity = Entity.objects.get_for_obj(account)
self.assertEquals(entity.entity_meta, {
'email': 'newemail@test.com',
'is_captain': False,
'team': None,
'team_is_active': None,
})
def test_post_update_account_relationship_activity(self):
"""
Creates an account that has super relationships. Verifies that the entity table is updated
properly when changing the activity of a relationship.
"""
# Verify that there are no entities
self.assertEquals(Entity.objects.all().count(), 0)
# Create a team
team = Team.objects.create(name='Team')
# Create an account under that team
account = Account.objects.create(email='test@test.com', team=team)
# There should be two entities. Test their existence and values
self.assertEquals(Entity.objects.all().count(), 2)
account_entity = Entity.objects.get_for_obj(account)
self.assertEquals(account_entity.entity_meta, {
'email': 'test@test.com',
'is_captain': False,
'team': 'Team',
'team_is_active': True,
})
team_entity = Entity.objects.get_for_obj(team)
self.assertEquals(team_entity.entity_meta, None)
# Check that the appropriate entity relationship was created
self.assertEquals(EntityRelationship.objects.all().count(), 1)
relationship = EntityRelationship.objects.first()
self.assertEquals(relationship.sub_entity, account_entity)
self.assertEquals(relationship.super_entity, team_entity)
# Update the account to be a team captain. According to our test project, this
# means it no longer has an active relationship to a team
account.is_captain = True
account.save()
# Verify that it no longer has an active relationship
self.assertEquals(EntityRelationship.objects.all().count(), 1)
relationship = EntityRelationship.objects.first()
self.assertEquals(relationship.sub_entity, account_entity)
self.assertEquals(relationship.super_entity, team_entity)
class TestSyncingMultipleEntities(EntityTestCase):
"""
Tests syncing multiple entities at once of different types.
"""
def test_sync_two_accounts(self):
turn_off_syncing()
team = G(Team)
account1 = G(Account, team=team)
account2 = G(Account, team=team)
G(TeamGroup)
sync_entities(account1, account2)
self.assertEquals(Entity.objects.count(), 3)
self.assertEquals(EntityRelationship.objects.count(), 2)
def test_sync_two_accounts_one_team_group(self):
turn_off_syncing()
team = G(Team)
account1 = G(Account, team=team)
account2 = G(Account, team=team)
team_group = G(TeamGroup)
sync_entities(account1, account2, team_group)
self.assertEquals(Entity.objects.count(), 4)
self.assertEquals(EntityRelationship.objects.count(), 2)
class TestCachingAndCascading(EntityTestCase):
"""
Tests caching, cascade syncing, and optimal queries when syncing single, multiple, or all entities.
"""
def test_cascade_sync_super_entities(self):
"""
Tests that super entities will be synced when a sub entity is synced (even if the super entities
werent synced before)
"""
turn_off_syncing()
team = G(Team)
turn_on_syncing()
self.assertFalse(Entity.objects.exists())
G(Account, team=team)
self.assertEquals(Entity.objects.count(), 2)
self.assertEquals(EntityRelationship.objects.count(), 1)
def test_no_cascade_if_super_entity_exists(self):
"""
Tests that super entities arent synced again if they have already been synced.
"""
account = G(Account, team=F(team_group=F()))
self.assertTrue(Account.objects.exists())
self.assertTrue(Team.objects.exists())
self.assertTrue(TeamGroup.objects.exists())
entity_syncer = EntitySyncer()
entity_syncer.sync_entities_and_relationships(account)
# Verify that only the account and team reside in the entity syncer cache. This means that
# the syncing didnt percolate all the way to the team group
self.assertEquals(len(entity_syncer._synced_entity_cache), 2)
def test_optimal_queries_registered_entity_with_no_qset(self):
"""
Tests that the optimal number of queries are performed when syncing a single entity that
did not register a queryset.
"""
team_group = G(TeamGroup)
ContentType.objects.clear_cache()
with self.assertNumQueries(5):
team_group.save()
def test_optimal_queries_registered_entity_w_qset(self):
"""
Tests that the entity is refetch with its queryset when syncing an individual entity.
"""
account = G(Account)
ContentType.objects.clear_cache()
with self.assertNumQueries(6):
account.save()
def test_sync_all_optimal_queries(self):
"""
Tests optimal queries of syncing all entities.
"""
# Create five test accounts
accounts = [Account.objects.create() for i in range(5)]
# Create two teams to assign to some of the accounts
teams = [Team.objects.create() for i in range(2)]
accounts[0].team = teams[0]
accounts[0].save()
accounts[1].team = teams[0]
accounts[1].save()
accounts[2].team = teams[1]
accounts[2].save()
accounts[3].team = teams[1]
accounts[3].save()
# Use an entity registry that only has accounts and teams. This ensures that other registered
# entity models dont pollute the test case
new_registry = EntityRegistry()
new_registry.register_entity(
Account.objects.select_related('team', 'team2', 'team_group', 'competitor'), AccountConfig)
new_registry.register_entity(
Team.objects.select_related('team_group'), TeamConfig)
with patch('entity.sync.entity_registry') as mock_entity_registry:
mock_entity_registry.entity_registry = new_registry.entity_registry
ContentType.objects.clear_cache()
with self.assertNumQueries(20):
sync_entities()
self.assertEquals(Entity.objects.filter(entity_type=ContentType.objects.get_for_model(Account)).count(), 5)
self.assertEquals(Entity.objects.filter(entity_type=ContentType.objects.get_for_model(Team)).count(), 2)
self.assertEquals(Entity.objects.all().count(), 7)
# There should be four entity relationships since four accounts have teams
self.assertEquals(EntityRelationship.objects.all().count(), 4)
| |
r"""plistlib.py -- a tool to generate and parse MacOSX .plist files.
The property list (.plist) file format is a simple XML pickle supporting
basic object types, like dictionaries, lists, numbers and strings.
Usually the top level object is a dictionary.
To write out a plist file, use the writePlist(rootObject, pathOrFile)
function. 'rootObject' is the top level object, 'pathOrFile' is a
filename or a (writable) file object.
To parse a plist from a file, use the readPlist(pathOrFile) function,
with a file name or a (readable) file object as the only argument. It
returns the top level object (again, usually a dictionary).
To work with plist data in bytes objects, you can use readPlistFromBytes()
and writePlistToBytes().
Values can be strings, integers, floats, booleans, tuples, lists,
dictionaries (but only with string keys), Data or datetime.datetime objects.
String values (including dictionary keys) have to be unicode strings -- they
will be written out as UTF-8.
The <data> plist type is supported through the Data class. This is a
thin wrapper around a Python bytes object. Use 'Data' if your strings
contain control characters.
Generate Plist example:
pl = dict(
aString = "Doodah",
aList = ["A", "B", 12, 32.1, [1, 2, 3]],
aFloat = 0.1,
anInt = 728,
aDict = dict(
anotherString = "<hello & hi there!>",
aUnicodeValue = "M\xe4ssig, Ma\xdf",
aTrueValue = True,
aFalseValue = False,
),
someData = Data(b"<binary gunk>"),
someMoreData = Data(b"<lots of binary gunk>" * 10),
aDate = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
writePlist(pl, fileName)
Parse Plist example:
pl = readPlist(pathOrFile)
print pl["aKey"]
"""
__all__ = [
"readPlist", "writePlist", "readPlistFromBytes", "writePlistToBytes",
"Plist", "Data", "Dict"
]
# Note: the Plist and Dict classes have been deprecated.
import binascii
import datetime
from io import BytesIO
import re
def readPlist(pathOrFile):
"""Read a .plist file. 'pathOrFile' may either be a file name or a
(readable) file object. Return the unpacked root object (which
usually is a dictionary).
"""
didOpen = False
try:
if isinstance(pathOrFile, str):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
p = PlistParser()
rootObject = p.parse(pathOrFile)
finally:
if didOpen:
pathOrFile.close()
return rootObject
def writePlist(rootObject, pathOrFile):
"""Write 'rootObject' to a .plist file. 'pathOrFile' may either be a
file name or a (writable) file object.
"""
didOpen = False
try:
if isinstance(pathOrFile, str):
pathOrFile = open(pathOrFile, 'wb')
didOpen = True
writer = PlistWriter(pathOrFile)
writer.writeln("<plist version=\"1.0\">")
writer.writeValue(rootObject)
writer.writeln("</plist>")
finally:
if didOpen:
pathOrFile.close()
def readPlistFromBytes(data):
"""Read a plist data from a bytes object. Return the root object.
"""
return readPlist(BytesIO(data))
def writePlistToBytes(rootObject):
"""Return 'rootObject' as a plist-formatted bytes object.
"""
f = BytesIO()
writePlist(rootObject, f)
return f.getvalue()
class DumbXMLWriter:
def __init__(self, file, indentLevel=0, indent="\t"):
self.file = file
self.stack = []
self.indentLevel = indentLevel
self.indent = indent
def beginElement(self, element):
self.stack.append(element)
self.writeln("<%s>" % element)
self.indentLevel += 1
def endElement(self, element):
assert self.indentLevel > 0
assert self.stack.pop() == element
self.indentLevel -= 1
self.writeln("</%s>" % element)
def simpleElement(self, element, value=None):
if value is not None:
value = _escape(value)
self.writeln("<%s>%s</%s>" % (element, value, element))
else:
self.writeln("<%s/>" % element)
def writeln(self, line):
if line:
# plist has fixed encoding of utf-8
if isinstance(line, str):
line = line.encode('utf-8')
self.file.write(self.indentLevel * self.indent)
self.file.write(line)
self.file.write(b'\n')
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'. Smaller units may be omitted with
# a loss of precision)
_dateParser = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z", re.ASCII)
def _dateFromString(s):
order = ('year', 'month', 'day', 'hour', 'minute', 'second')
gd = _dateParser.match(s).groupdict()
lst = []
for key in order:
val = gd[key]
if val is None:
break
lst.append(int(val))
return datetime.datetime(*lst)
def _dateToString(d):
return '%04d-%02d-%02dT%02d:%02d:%02dZ' % (
d.year, d.month, d.day,
d.hour, d.minute, d.second
)
# Regex to find any control chars, except for \t \n and \r
_controlCharPat = re.compile(
r"[\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f"
r"\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f]")
def _escape(text):
m = _controlCharPat.search(text)
if m is not None:
raise ValueError("strings can't contains control characters; "
"use plistlib.Data instead")
text = text.replace("\r\n", "\n") # convert DOS line endings
text = text.replace("\r", "\n") # convert Mac line endings
text = text.replace("&", "&") # escape '&'
text = text.replace("<", "<") # escape '<'
text = text.replace(">", ">") # escape '>'
return text
PLISTHEADER = b"""\
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
"""
class PlistWriter(DumbXMLWriter):
def __init__(self, file, indentLevel=0, indent=b"\t", writeHeader=1):
if writeHeader:
file.write(PLISTHEADER)
DumbXMLWriter.__init__(self, file, indentLevel, indent)
def writeValue(self, value):
if isinstance(value, str):
self.simpleElement("string", value)
elif isinstance(value, bool):
# must switch for bool before int, as bool is a
# subclass of int...
if value:
self.simpleElement("true")
else:
self.simpleElement("false")
elif isinstance(value, int):
self.simpleElement("integer", "%d" % value)
elif isinstance(value, float):
self.simpleElement("real", repr(value))
elif isinstance(value, dict):
self.writeDict(value)
elif isinstance(value, Data):
self.writeData(value)
elif isinstance(value, datetime.datetime):
self.simpleElement("date", _dateToString(value))
elif isinstance(value, (tuple, list)):
self.writeArray(value)
else:
raise TypeError("unsupported type: %s" % type(value))
def writeData(self, data):
self.beginElement("data")
self.indentLevel -= 1
maxlinelength = max(16, 76 - len(self.indent.replace(b"\t", b" " * 8) *
self.indentLevel))
for line in data.asBase64(maxlinelength).split(b"\n"):
if line:
self.writeln(line)
self.indentLevel += 1
self.endElement("data")
def writeDict(self, d):
if d:
self.beginElement("dict")
items = sorted(d.items())
for key, value in items:
if not isinstance(key, str):
raise TypeError("keys must be strings")
self.simpleElement("key", key)
self.writeValue(value)
self.endElement("dict")
else:
self.simpleElement("dict")
def writeArray(self, array):
if array:
self.beginElement("array")
for value in array:
self.writeValue(value)
self.endElement("array")
else:
self.simpleElement("array")
class _InternalDict(dict):
# This class is needed while Dict is scheduled for deprecation:
# we only need to warn when a *user* instantiates Dict or when
# the "attribute notation for dict keys" is used.
def __getattr__(self, attr):
try:
value = self[attr]
except KeyError:
raise AttributeError(attr)
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
return value
def __setattr__(self, attr, value):
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
self[attr] = value
def __delattr__(self, attr):
try:
del self[attr]
except KeyError:
raise AttributeError(attr)
from warnings import warn
warn("Attribute access from plist dicts is deprecated, use d[key] "
"notation instead", DeprecationWarning, 2)
class Dict(_InternalDict):
def __init__(self, **kwargs):
from warnings import warn
warn("The plistlib.Dict class is deprecated, use builtin dict instead",
DeprecationWarning, 2)
super().__init__(**kwargs)
class Plist(_InternalDict):
"""This class has been deprecated. Use readPlist() and writePlist()
functions instead, together with regular dict objects.
"""
def __init__(self, **kwargs):
from warnings import warn
warn("The Plist class is deprecated, use the readPlist() and "
"writePlist() functions instead", DeprecationWarning, 2)
super().__init__(**kwargs)
def fromFile(cls, pathOrFile):
"""Deprecated. Use the readPlist() function instead."""
rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
fromFile = classmethod(fromFile)
def write(self, pathOrFile):
"""Deprecated. Use the writePlist() function instead."""
writePlist(self, pathOrFile)
def _encodeBase64(s, maxlinelength=76):
# copied from base64.encodebytes(), with added maxlinelength argument
maxbinsize = (maxlinelength//4)*3
pieces = []
for i in range(0, len(s), maxbinsize):
chunk = s[i : i + maxbinsize]
pieces.append(binascii.b2a_base64(chunk))
return b''.join(pieces)
class Data:
"""Wrapper for binary data."""
def __init__(self, data):
if not isinstance(data, bytes):
raise TypeError("data must be as bytes")
self.data = data
@classmethod
def fromBase64(cls, data):
# base64.decodebytes just calls binascii.a2b_base64;
# it seems overkill to use both base64 and binascii.
return cls(binascii.a2b_base64(data))
def asBase64(self, maxlinelength=76):
return _encodeBase64(self.data, maxlinelength)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.data == other.data
elif isinstance(other, str):
return self.data == other
else:
return id(self) == id(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
class PlistParser:
def __init__(self):
self.stack = []
self.currentKey = None
self.root = None
def parse(self, fileobj):
from xml.parsers.expat import ParserCreate
self.parser = ParserCreate()
self.parser.StartElementHandler = self.handleBeginElement
self.parser.EndElementHandler = self.handleEndElement
self.parser.CharacterDataHandler = self.handleData
self.parser.ParseFile(fileobj)
return self.root
def handleBeginElement(self, element, attrs):
self.data = []
handler = getattr(self, "begin_" + element, None)
if handler is not None:
handler(attrs)
def handleEndElement(self, element):
handler = getattr(self, "end_" + element, None)
if handler is not None:
handler()
def handleData(self, data):
self.data.append(data)
def addObject(self, value):
if self.currentKey is not None:
if not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1][self.currentKey] = value
self.currentKey = None
elif not self.stack:
# this is the root object
self.root = value
else:
if not isinstance(self.stack[-1], type([])):
raise ValueError("unexpected element at line %d" %
self.parser.CurrentLineNumber)
self.stack[-1].append(value)
def getData(self):
data = ''.join(self.data)
self.data = []
return data
# element handlers
def begin_dict(self, attrs):
d = _InternalDict()
self.addObject(d)
self.stack.append(d)
def end_dict(self):
if self.currentKey:
raise ValueError("missing value for key '%s' at line %d" %
(self.currentKey,self.parser.CurrentLineNumber))
self.stack.pop()
def end_key(self):
if self.currentKey or not isinstance(self.stack[-1], type({})):
raise ValueError("unexpected key at line %d" %
self.parser.CurrentLineNumber)
self.currentKey = self.getData()
def begin_array(self, attrs):
a = []
self.addObject(a)
self.stack.append(a)
def end_array(self):
self.stack.pop()
def end_true(self):
self.addObject(True)
def end_false(self):
self.addObject(False)
def end_integer(self):
self.addObject(int(self.getData()))
def end_real(self):
self.addObject(float(self.getData()))
def end_string(self):
self.addObject(self.getData())
def end_data(self):
self.addObject(Data.fromBase64(self.getData().encode("utf-8")))
def end_date(self):
self.addObject(_dateFromString(self.getData()))
| |
import collections
import os
import sys
import six
try:
from functools import wraps
except ImportError:
# only needed for Python 2.4
def wraps(_):
def _wraps(func):
return func
return _wraps
__unittest = True
def _relpath_nt(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
# default to posixpath definition
def _relpath_posix(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
if os.path is sys.modules.get('ntpath'):
relpath = _relpath_nt
else:
relpath = _relpath_posix
def with_context(context, callableobj, *args, **kwargs):
"""
Execute a callable utilizing a context object
in the same way that the 'with' statement would
"""
context.__enter__()
try:
callableobj(*args, **kwargs)
except:
if not context.__exit__(*sys.exc_info()):
raise
else:
return
else:
context.__exit__(None, None, None)
# copied from Python 2.6
try:
from warnings import catch_warnings
except ImportError:
class catch_warnings(object):
def __init__(self, record=False, module=None):
self._record = record
self._module = sys.modules['warnings']
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
log = []
def showwarning(*args, **kwargs):
log.append(WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
class WarningMessage(object):
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line")
def __init__(self, message, category, filename, lineno, file=None,
line=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = None
if category.__name__:
self._category_name = category.__name__
# Copied from 3.5
########################################################################
### ChainMap (helper for configparser and string.Template)
########################################################################
class ChainMap(collections.MutableMapping):
''' A ChainMap groups multiple dicts (or other mappings) together
to create a single, updateable view.
The underlying mappings are stored in a list. That list is public and can
accessed or updated using the *maps* attribute. There is no other state.
Lookups search the underlying mappings successively until a key is found.
In contrast, writes, updates, and deletions only operate on the first
mapping.
'''
def __init__(self, *maps):
'''Initialize a ChainMap by setting *maps* to the given mappings.
If no mappings are provided, a single empty dictionary is used.
'''
self.maps = list(maps) or [{}] # always at least one map
def __missing__(self, key):
raise KeyError(key)
def __getitem__(self, key):
for mapping in self.maps:
try:
return mapping[key] # can't use 'key in mapping' with defaultdict
except KeyError:
pass
return self.__missing__(key) # support subclasses that define __missing__
def get(self, key, default=None):
return self[key] if key in self else default
def __len__(self):
return len(set().union(*self.maps)) # reuses stored hash values if possible
def __iter__(self):
return iter(set().union(*self.maps))
def __contains__(self, key):
return any(key in m for m in self.maps)
def __bool__(self):
return any(self.maps)
if getattr(collections, '_recursive_repr', None):
@collections._recursive_repr()
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
else:
def __repr__(self):
return '{0.__class__.__name__}({1})'.format(
self, ', '.join(map(repr, self.maps)))
@classmethod
def fromkeys(cls, iterable, *args):
'Create a ChainMap with a single dict created from the iterable.'
return cls(dict.fromkeys(iterable, *args))
def copy(self):
'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
return self.__class__(self.maps[0].copy(), *self.maps[1:])
__copy__ = copy
def new_child(self, m=None): # like Django's Context.push()
'''
New ChainMap with a new map followed by all previous maps. If no
map is provided, an empty dict is used.
'''
if m is None:
m = {}
return self.__class__(m, *self.maps)
@property
def parents(self): # like Django's Context.pop()
'New ChainMap from maps[1:].'
return self.__class__(*self.maps[1:])
def __setitem__(self, key, value):
self.maps[0][key] = value
def __delitem__(self, key):
try:
del self.maps[0][key]
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def popitem(self):
'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
try:
return self.maps[0].popitem()
except KeyError:
raise KeyError('No keys found in the first mapping.')
def pop(self, key, *args):
'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
try:
return self.maps[0].pop(key, *args)
except KeyError:
raise KeyError('Key not found in the first mapping: {!r}'.format(key))
def clear(self):
'Clear maps[0], leaving maps[1:] intact.'
self.maps[0].clear()
if sys.version_info[:2] < (3, 4):
collections.ChainMap = ChainMap
# support raise_from on 3.x:
# submitted to six: https://bitbucket.org/gutworth/six/issue/102/raise-foo-from-bar-is-a-syntax-error-on-27
if sys.version_info[:2] > (3, 2):
six.exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
| |
"""
@name: Modules/House/Family/Insteon/insteon_plm.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2010-2020 by D. Brian Kimmel
@note: Created on Feb 18, 2010
@license: MIT License
@summary: This module is for sending commands to and receiving responses from an Insteon Controller.
Create commands and interpret results from any Insteon controller regardless of interface.
This module carries state information about the controller.
This is necessary since the responses may follow a command at any interval.
Responses do not all have to follow the command that caused them.
Note that we only communicate with the local PLM.
"""
__updated__ = '2020-02-21'
# Import system type stuff
import datetime
import queue as Queue
# Import PyMh files
from Modules.Core.Drivers import interface
from Modules.House.Family.Insteon import insteon_decoder, insteon_utils, insteon_link
from Modules.House.Family.Insteon.insteon_constants import MESSAGE_TYPES
from Modules.House.Family.Insteon.insteon_utils import Decode as utilDecode
from Modules.Core.Utilities.debug_tools import PrettyFormatAny
from Modules.Core.Utilities.debug_tools import FormatBytes
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.insteon_plm ')
# Timeouts for send/receive delays
SEND_TIMEOUT = 0.8 # Uset to avoid swamping the PLM with commands - Derived empirically
RECEIVE_TIMEOUT = 0.6 # this is for fetching data in the rx buffer
# Modes for setting PLM mode
# MODE_DISABLE_DEADMAN = 0x10
# MODE_DISABLE_AUTO_LED = 0x20
MODE_MONITOR = 0x40
# MODE_DISABLE_AUTO_LINK = 0x80
# Message flag bits (Page 55 of Developers Manual).
# FLAG_BROADCAST_NAK = 0x80
# FLAG_ALL_LINK = 0x40
# FLAG_ACKNOWLEDGEMENT = 0x20
# FLAG_EXTENDED_CMD = 0x10
FLAG_HOPS_LEFT = 0x0C
FLAG_MAX_HOPS = 0x03
class Commands:
@staticmethod
def _queue_60_command(p_controller_obj):
""" Get IM info (2 bytes).
See p 273 of developers guide.
PLM will respond with a 0x60 response.
"""
LOG.info("Command to get IM info (60)")
l_command = insteon_utils.create_command_message('plm_info')
insteon_utils.queue_command(p_controller_obj, l_command, 'Plm Info')
@staticmethod
def _queue_62_command(p_controller_obj, p_obj, p_cmd1, p_cmd2, p_text='None'):
""" Send Insteon Standard Length Message (8 bytes) (SD command).
or Extended length (22 Bytes) (ED command)
See page 230(243) of 2009 developers guide.
@param p_obj: is the device object.
@param p_cmd1: is the first command byte
@param p_cmd2: is the second command byte
[0] = x02
[1] = 0x62
[2-4] = to address
[5] = Message Flags
[6] = Command 1
[7] = Command 2
(8-21) = Extended data in ED type
"""
try:
l_command = insteon_utils.create_command_message('insteon_send')
insteon_utils.insert_address_into_message(p_obj.Family.Address, l_command, 2)
l_command[5] = FLAG_MAX_HOPS + FLAG_HOPS_LEFT # 0x0F
l_command[6] = p_obj._Command1 = p_cmd1
l_command[7] = p_obj._Command2 = p_cmd2
insteon_utils.queue_command(p_controller_obj, l_command, p_text)
# LOG.debug('Send Command: {}'.format(FormatBytes(l_command)))
except Exception as _e_err:
LOG.error('Error creating command: {}\n{}\n>>{}<<'.format(_e_err, PrettyFormatAny.form(p_obj, 'Device'), FormatBytes(l_command)))
@staticmethod
def queue_6B_command(p_controller_obj, p_flags):
""" Set IM configuration flags (3 bytes).
See page 271 of Insteon Developers Guide.
"""
LOG.info("Command to set PLM config flag (6B) - to {:#X}".format(p_flags))
l_command = insteon_utils.create_command_message('plm_set_config')
l_command[2] = p_flags
insteon_utils.queue_command(p_controller_obj, l_command, 'Set Plm Config')
@staticmethod
def queue_6C_command(p_controller_obj):
pass
@staticmethod
def queue_6D_command(p_controller_obj):
pass
@staticmethod
def queue_6E_command(p_controller_obj):
pass
@staticmethod
def queue_70_command(p_controller_obj):
pass
@staticmethod
def queue_71_command(p_controller_obj):
pass
@staticmethod
def queue_72_command(p_controller_obj):
"""RF Sleep"""
pass
@staticmethod
def queue_73_command(p_controller_obj):
""" Send request for PLM configuration (2 bytes).
See page 270 of Insteon Developers Guide.
"""
LOG.info("Command to get PLM config (73).")
l_command = insteon_utils.create_command_message('plm_get_config')
insteon_utils.queue_command(p_controller_obj, l_command, 'Get Plm Config')
class PlmDriverProtocol(Commands):
"""
Check the command queue and send the 1st command if available.
check the plm for received data
If nothing to send - try again in X seconds.
if nothing received, try again in Y seconds.
"""
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj, p_controller_obj):
self.m_pyhouse_obj = p_pyhouse_obj
LOG.info("Initializing PLM Device Driver Protocol.")
p_controller_obj._Queue = Queue.Queue(300)
self.m_decoder = insteon_decoder.DecodeResponses(p_pyhouse_obj, p_controller_obj)
self.dequeue_and_send(p_controller_obj)
self.receive_loop(p_controller_obj)
LOG.info("Finished initializing PLM Device Driver Protocol.")
def driver_loop_stop(self):
LOG.info('Stopped.')
pass
def _find_to_name(self, p_command):
""" Find the device we are sending a message "To"
"""
l_name = 'No device'
try:
l_device_obj = utilDecode().get_obj_from_message(self.m_pyhouse_obj, p_command[2:5])
l_name = l_device_obj.Name
except Exception:
l_name = "Device does not exist."
return l_name
def dequeue_and_send(self, p_controller_obj):
"""Check the sending queue every SEND_TIMEOUT seconds and send if anything to send.
This timed delay will avoid swamping the PLM with too many commands at once
Uses twisted to get a callback when the timer expires.
"""
# LOG.debug('Send')
self.m_pyhouse_obj._Twisted.Reactor.callLater(SEND_TIMEOUT, self.dequeue_and_send, p_controller_obj)
try:
l_entry = p_controller_obj._Queue.get(False)
l_command = l_entry.Command
l_text = l_entry.Text
except Queue.Empty:
return
if p_controller_obj.Interface._DriverApi:
_l_name = self._find_to_name(l_command)
LOG.info("To: {}, Message: {}".format(_l_name, l_text))
p_controller_obj._Command1 = l_command
p_controller_obj.Interface._DriverApi.Write(l_command)
else:
LOG.error('UhOh - No driver for {}'.format(p_controller_obj.Name))
def _append_message(self, p_controller_obj):
"""
Accumulate data received
"""
# LOG.debug(PrettyFormatAny.form(p_controller_obj, 'Controller'))
l_msg = p_controller_obj.Interface._DriverApi.Read()
# LOG.debug('Read Data: {}'.format(l_msg))
p_controller_obj._Message.extend(l_msg)
return p_controller_obj._Message # For debugging
def receive_loop(self, p_controller_obj):
"""Check the driver to see if the controller returned any messages.
Decode message only when we get enough bytes to complete a message.
Note that there may be more bytes than we need - preserve them.
TODO: instead of fixed time, callback to here from driver when bytes are rx'ed.
"""
# LOG.debug('ReceiveLoop ')
self.m_pyhouse_obj._Twisted.Reactor.callLater(RECEIVE_TIMEOUT, self.receive_loop, p_controller_obj)
if p_controller_obj.Interface._DriverApi:
self._append_message(p_controller_obj)
l_cur_len = len(p_controller_obj._Message)
if l_cur_len < 2:
return
# LOG.debug('Receive message is now {}'.format((p_controller_obj._Message)))
l_response_len = insteon_utils.get_message_length(p_controller_obj._Message)
if l_cur_len >= l_response_len:
self.m_decoder.decode_message(p_controller_obj)
else:
LOG.error('Driver missing for {}'.format(p_controller_obj.Name))
class InsteonPlmApi:
"""
"""
def get_link_records(self, p_controller_obj, _p_obj):
return insteon_link.InsteonAllLinks().get_all_allinks(p_controller_obj)
class LightHandlerApi:
"""This is the Api for light control.
"""
m_pyhouse_obj = None
def start_controller_driver(self, p_pyhouse_obj, p_controller_obj):
"""
@param p_controller_obj: ==>
"""
# LOG.debug(PrettyFormatAny.form(p_controller_obj, 'Controller'))
if p_controller_obj._isLocal:
self.m_pyhouse_obj = p_pyhouse_obj
l_msg = "Controller:{}, ".format(p_controller_obj.Name)
l_msg += "Family.Name:{}, ".format(p_controller_obj.Family.Name)
l_msg += "InterfaceType:{}".format(p_controller_obj.Interface.Type)
LOG.info('Start Controller - {}'.format(l_msg))
# LOG.debug(PrettyFormatAny.form(p_controller_obj, 'Controller'))
l_driver = interface.get_device_driver_Api(p_pyhouse_obj, p_controller_obj.Interface)
l_ret = l_driver.Start(p_controller_obj)
else:
LOG.warning('Can not config a remote controller.')
l_ret = None
return l_ret
def stop_controller_driver(self, p_controller_obj):
if p_controller_obj.Interface._DriverApi:
p_controller_obj.Interface._DriverApi.Stop()
def set_plm_mode(self, p_controller_obj):
"""Set the PLM to a mode
Places the PLM into "Monitor Mode."
The documentation is a little unclear on what this does.
In practice, this enables the PLM to receive B<Broadcast> messages from devices which are in the PLM's link database.
So far, I have encountered two important broadcast messages,
1) EZFlora (EZRain) can send out broadcast messages whenever a valve changes state,
2) Each device will send out a broadcast message whenever you hold down the set button for 10 seconds.
Within MisterHouse this message is used to mark a deaf device as awake for 4 minutes.
If Monitor Mode is not enabled, PyHouse will not see these messages.
"""
LOG.info('Setting mode of Insteon controller {}.'.format(p_controller_obj.Name))
Commands.queue_6B_command(p_controller_obj, MODE_MONITOR)
@staticmethod
def _get_one_device_status(p_controller_obj, p_obj):
"""Get the status of a light.
We will (apparently) get back a 62-ACK followed by a 50 with the level in the response.
"""
Commands._queue_62_command(p_controller_obj, p_obj, MESSAGE_TYPES['status_request'], 0, 'Device Status') # 0x19
@staticmethod
def _get_engine_version(p_controller_obj, p_obj):
""" i1 = pre 2007 I think
i2 = no checksum - new commands
i2cs = 2012 add checksums + new commands.
"""
LOG.info('Request Engine version from device: {}'.format(p_obj.Name))
Commands._queue_62_command(p_controller_obj, p_obj, MESSAGE_TYPES['engine_version'], 0, 'Engine Version') # 0x0D
@staticmethod
def _get_id_request(p_controller_obj, p_obj):
"""Get the device Dev Cat
"""
LOG.info('Request ID(devCat) from device: {}'.format(p_obj.Name))
Commands._queue_62_command(p_controller_obj, p_obj, MESSAGE_TYPES['id_request'], 0, 'ID Request') # 0x10
def _get_obj_info(self, p_controller_obj, p_obj):
if p_obj.Family.Name.lower() == 'insteon':
self._get_engine_version(p_controller_obj, p_obj)
self._get_id_request(p_controller_obj, p_obj)
self._get_one_device_status(p_controller_obj, p_obj)
else:
LOG.warning('Skipping "{}" "{}" device "{}"'.format(p_obj.DeviceType, p_obj.DeviceSubType, p_obj.Name))
pass
def get_all_device_information(self, p_pyhouse_obj, p_controller_obj):
"""Get the status (current level) of all insteon devices.
Used at device start up to populate the database.
"""
LOG.info('Getting information for all Insteon devices.')
# LOG.debug(PrettyFormatAny.form(p_pyhouse_obj.House.Lighting, 'Lighting'))
if p_pyhouse_obj.House.Lighting.Buttons != None:
for l_obj in p_pyhouse_obj.House.Lighting.Buttons.values():
self._get_obj_info(p_controller_obj, l_obj)
LOG.info('got {} Buttons'.format(len(p_pyhouse_obj.House.Lighting.Buttons)))
if p_pyhouse_obj.House.Lighting.Lights != None:
for l_obj in p_pyhouse_obj.House.Lighting.Lights.values():
if l_obj.Family.Name.lower() == 'insteon':
self._get_obj_info(p_controller_obj, l_obj)
# InsteonPlmCommands.scan_one_light(p_controller_obj, l_obj)
LOG.info('got {} Lights'.format(len(p_pyhouse_obj.House.Lighting.Lights)))
if p_pyhouse_obj.House.Lighting.Outlets != None:
for l_obj in p_pyhouse_obj.House.Lighting.Outlets.values():
if l_obj.Family.Name.lower() == 'insteon':
self._get_obj_info(p_controller_obj, l_obj)
# InsteonPlmCommands.scan_one_light(p_controller_obj, l_obj)
LOG.info('got {} Outlets'.format(len(p_pyhouse_obj.House.Lighting.Outlets)))
if p_pyhouse_obj.House.Lighting.Controllers != None:
for l_obj in p_pyhouse_obj.House.Lighting.Controllers.values():
# LOG.debug(PrettyFormatAny.form(l_obj.Family, 'Family'))
self._get_obj_info(p_controller_obj, l_obj)
if l_obj.Family.Name.lower() == 'insteon':
# self._get_obj_info(p_controller_obj, l_obj)
InsteonPlmApi().get_link_records(p_controller_obj, l_obj) # Only from controller
LOG.info('got {} Controllers'.format(len(p_pyhouse_obj.House.Lighting.Controllers)))
class Api(LightHandlerApi):
m_controller_obj = None
m_pyhouse_obj = None
def __init__(self, p_pyhouse_obj, p_controller_obj):
self.m_pyhouse_obj = p_pyhouse_obj
self.m_controller_obj = p_controller_obj
def _get_plm_info(self):
"""
"""
LOG.info('Get Plm Info')
if self.m_controller_obj.Interface._DriverApi:
insteon_link.SendCmd().read_aldb_v2(self.m_controller_obj)
def XXX_get_controller(self):
""" used in testing to load the controller info to be used in testing.
"""
return self.m_controller_obj
def _start_all_controllers(self):
"""
@param p_controller_obj: is the particular controller that we will be starting
@return: True if the driver opened OK and is usable
False if the driver is not functional for any reason.
"""
LOG.info('Starting all Controllers: "{}"'.format(self.m_controller_obj.Name))
l_ret = self.start_controller_driver(self.m_pyhouse_obj, self.m_controller_obj)
if l_ret != None:
self.m_controller_obj.Node = self.m_pyhouse_obj.Computer.Name
self.m_controller_obj.LastUsed = datetime.datetime.now()
LOG.info('Controller Driver Start was OK for "{}".'.format(self.m_controller_obj.Name))
self.m_protocol = PlmDriverProtocol(self.m_pyhouse_obj, self.m_controller_obj)
self.set_plm_mode(self.m_controller_obj)
self.get_all_device_information(self.m_pyhouse_obj, self.m_controller_obj)
else:
LOG.error('Insteon Controller start failed for "{}"'.format(self.m_controller_obj.Name))
l_ret = False
l_topic = 'house/lighting/controllers/status'
self.m_pyhouse_obj.Core.MqttApi.MqttPublish(l_topic, self.m_controller_obj)
return l_ret
def Start(self):
"""
Comes from Insteon_device.Api.Start()
@param p_controller_obj: is the controller we are starting
@return: True if the driver opened OK and is usable
False if the driver is not functional for any reason.
"""
LOG.info('Starting a PLM')
# LOG.debug(PrettyFormatAny.form(self.m_controller_obj, 'Controller'))
l_ret = self._start_all_controllers()
if l_ret:
self._get_plm_info()
LOG.info('Started.')
return l_ret
def Stop(self):
self.m_protocol.driver_loop_stop()
self.stop_controller_driver(self.m_controller_obj)
LOG.info('Stopped.')
def Control(self, p_device_obj, p_controller_obj, p_control):
"""
Insteon PLM specific version of control light
All that Insteon can control is Brightness and Fade Rate.
This actually queues upthe commands
@param p_controller_obj: optional
@param p_device_obj: the device being controlled
@param p_control: the idealized light control params ==> Modules.House.Lighting.Lights.lights.Light Data()
"""
l_level = int(p_control.BrightnessPct)
l_rate = 0 # The transition time is not implemented currently.
# LOG.debug("Insteon Device Name:'{}'; to level:'{}'; at rate:'{}'; Using:'{}'".format(p_device_obj.Name, l_level, l_rate, p_controller_obj.Name))
if l_level == 0:
Commands._queue_62_command(p_controller_obj, p_device_obj, MESSAGE_TYPES['off'], 0, 'Turn OFF') # 0x13
elif l_level > 95:
Commands._queue_62_command(p_controller_obj, p_device_obj, MESSAGE_TYPES['on'], 255, 'Turn ON') # 0x11
else:
l_level = int(l_level * 255 / 100)
Commands._queue_62_command(p_controller_obj, p_device_obj, MESSAGE_TYPES['on'], l_level, 'Device level {} pct.'.format(l_level)) # 0x11
# ## END DBK
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.profiler import option_builder
# pylint: disable=g-bad-import-order
from tensorflow.python.profiler import model_analyzer
from tensorflow.python.profiler.internal import model_analyzer_testlib as lib
builder = option_builder.ProfileOptionBuilder
class ProfilerTest(test.TestCase):
@test_util.run_deprecated_v1
def testProfileBasic(self):
ops.reset_default_graph()
outfile = os.path.join(test.get_temp_dir(), 'dump')
opts = (builder(builder.trainable_variables_parameter())
.with_file_output(outfile)
.with_accounted_types(['.*'])
.select(['params', 'float_ops', 'micros', 'bytes',
'device', 'op_types', 'occurrence']).build())
# Test the output without run_meta.
sess = session.Session()
r = lib.BuildFullModel()
sess.run(variables.global_variables_initializer())
# Test the output with run_meta.
run_meta = config_pb2.RunMetadata()
_ = sess.run(r,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler = model_analyzer.Profiler(sess.graph)
profiler.add_step(1, run_meta)
profiler.profile_graph(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='graph', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_name_scope(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='scope', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_python(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='code', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
profiler.profile_operations(opts)
with gfile.Open(outfile, 'r') as f:
profiler_str = f.read()
model_analyzer.profile(
sess.graph, cmd='op', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertEqual(pma_str, profiler_str)
model_analyzer.profile(
sess.graph, cmd='scope', run_meta=run_meta, options=opts)
with gfile.Open(outfile, 'r') as f:
pma_str = f.read()
self.assertNotEqual(pma_str, profiler_str)
def testMultiStepProfile(self):
ops.reset_default_graph()
opts = builder.time_and_memory(min_bytes=0)
with session.Session() as sess:
r1, r2, r3 = lib.BuildSplitableModel()
sess.run(variables.global_variables_initializer())
profiler = model_analyzer.Profiler(sess.graph)
pb0 = profiler.profile_name_scope(opts)
run_meta = config_pb2.RunMetadata()
_ = sess.run(r1,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(1, run_meta)
pb1 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb1, 'DW'), None)
self.assertEqual(lib.SearchTFProfNode(pb1, 'DW2'), None)
self.assertEqual(lib.SearchTFProfNode(pb1, 'add'), None)
run_meta2 = config_pb2.RunMetadata()
_ = sess.run(r2,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta2)
profiler.add_step(2, run_meta2)
pb2 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb2, 'DW'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb2, 'DW2'), None)
self.assertEqual(lib.SearchTFProfNode(pb2, 'add'), None)
run_meta3 = config_pb2.RunMetadata()
_ = sess.run(r3,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=run_meta3)
profiler.add_step(3, run_meta3)
pb3 = profiler.profile_name_scope(opts)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'DW'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'DW2'), None)
self.assertNotEqual(lib.SearchTFProfNode(pb3, 'add'), None)
self.assertEqual(lib.SearchTFProfNode(pb0, 'Conv2D'), None)
self.assertGreater(lib.SearchTFProfNode(pb1, 'Conv2D').exec_micros, 0)
self.assertEqual(lib.SearchTFProfNode(pb1, 'Conv2D_1'), None)
self.assertGreater(lib.SearchTFProfNode(pb2, 'Conv2D_1').exec_micros, 0)
self.assertEqual(lib.SearchTFProfNode(pb2, 'add'), None)
self.assertGreater(lib.SearchTFProfNode(pb3, 'add').exec_micros, 0)
advice_pb = profiler.advise(model_analyzer.ALL_ADVICE)
self.assertTrue('AcceleratorUtilizationChecker' in advice_pb.checkers)
self.assertTrue('ExpensiveOperationChecker' in advice_pb.checkers)
self.assertTrue('OperationChecker' in advice_pb.checkers)
checker = advice_pb.checkers['AcceleratorUtilizationChecker']
if test.is_gpu_available():
self.assertGreater(len(checker.reports), 0)
else:
self.assertEqual(len(checker.reports), 0)
checker = advice_pb.checkers['ExpensiveOperationChecker']
self.assertGreater(len(checker.reports), 0)
@test_util.run_deprecated_v1
def testMultipleProfilePerStep(self):
ops.reset_default_graph()
opts = (builder(builder.trainable_variables_parameter())
.with_empty_output()
.with_accounted_types(['.*'])
.select(['micros', 'bytes', 'peak_bytes',
'residual_bytes', 'output_bytes']).build())
r = lib.BuildSmallModel()
sess = session.Session()
profiler = model_analyzer.Profiler(sess.graph)
init_var_run_meta = config_pb2.RunMetadata()
sess.run(variables.global_variables_initializer(),
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=init_var_run_meta)
train_run_meta = config_pb2.RunMetadata()
sess.run(r,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE),
run_metadata=train_run_meta)
profiler.add_step(0, train_run_meta)
ret1 = profiler.profile_name_scope(opts)
n1 = lib.SearchTFProfNode(
ret1, 'DW/Initializer/random_normal/RandomStandardNormal')
# Without the var initialization run_meta, it doesn't have the
# information of var_initialization.
self.assertEqual(n1.exec_micros, 0)
self.assertEqual(n1.requested_bytes, 0)
self.assertEqual(n1.peak_bytes, 0)
self.assertEqual(n1.residual_bytes, 0)
profiler.add_step(0, init_var_run_meta)
ret2 = profiler.profile_name_scope(opts)
n2 = lib.SearchTFProfNode(
ret2, 'DW/Initializer/random_normal/RandomStandardNormal')
# After adding the var initialization run_meta.
self.assertGreater(n2.exec_micros, 0)
self.assertGreater(n2.requested_bytes, 0)
self.assertGreater(n2.peak_bytes, 0)
self.assertGreater(n2.residual_bytes, 0)
if __name__ == '__main__':
test.main()
| |
data = (
'[?]', # 0x00
'[?]', # 0x01
'[?]', # 0x02
'[?]', # 0x03
'[?]', # 0x04
'B', # 0x05
'P', # 0x06
'M', # 0x07
'F', # 0x08
'D', # 0x09
'T', # 0x0a
'N', # 0x0b
'L', # 0x0c
'G', # 0x0d
'K', # 0x0e
'H', # 0x0f
'J', # 0x10
'Q', # 0x11
'X', # 0x12
'ZH', # 0x13
'CH', # 0x14
'SH', # 0x15
'R', # 0x16
'Z', # 0x17
'C', # 0x18
'S', # 0x19
'A', # 0x1a
'O', # 0x1b
'E', # 0x1c
'EH', # 0x1d
'AI', # 0x1e
'EI', # 0x1f
'AU', # 0x20
'OU', # 0x21
'AN', # 0x22
'EN', # 0x23
'ANG', # 0x24
'ENG', # 0x25
'ER', # 0x26
'I', # 0x27
'U', # 0x28
'IU', # 0x29
'V', # 0x2a
'NG', # 0x2b
'GN', # 0x2c
'[?]', # 0x2d
'[?]', # 0x2e
'[?]', # 0x2f
'[?]', # 0x30
'g', # 0x31
'gg', # 0x32
'gs', # 0x33
'n', # 0x34
'nj', # 0x35
'nh', # 0x36
'd', # 0x37
'dd', # 0x38
'r', # 0x39
'lg', # 0x3a
'lm', # 0x3b
'lb', # 0x3c
'ls', # 0x3d
'lt', # 0x3e
'lp', # 0x3f
'rh', # 0x40
'm', # 0x41
'b', # 0x42
'bb', # 0x43
'bs', # 0x44
's', # 0x45
'ss', # 0x46
'', # 0x47
'j', # 0x48
'jj', # 0x49
'c', # 0x4a
'k', # 0x4b
't', # 0x4c
'p', # 0x4d
'h', # 0x4e
'a', # 0x4f
'ae', # 0x50
'ya', # 0x51
'yae', # 0x52
'eo', # 0x53
'e', # 0x54
'yeo', # 0x55
'ye', # 0x56
'o', # 0x57
'wa', # 0x58
'wae', # 0x59
'oe', # 0x5a
'yo', # 0x5b
'u', # 0x5c
'weo', # 0x5d
'we', # 0x5e
'wi', # 0x5f
'yu', # 0x60
'eu', # 0x61
'yi', # 0x62
'i', # 0x63
'', # 0x64
'nn', # 0x65
'nd', # 0x66
'ns', # 0x67
'nZ', # 0x68
'lgs', # 0x69
'ld', # 0x6a
'lbs', # 0x6b
'lZ', # 0x6c
'lQ', # 0x6d
'mb', # 0x6e
'ms', # 0x6f
'mZ', # 0x70
'mN', # 0x71
'bg', # 0x72
'', # 0x73
'bsg', # 0x74
'bst', # 0x75
'bj', # 0x76
'bt', # 0x77
'bN', # 0x78
'bbN', # 0x79
'sg', # 0x7a
'sn', # 0x7b
'sd', # 0x7c
'sb', # 0x7d
'sj', # 0x7e
'Z', # 0x7f
'', # 0x80
'N', # 0x81
'Ns', # 0x82
'NZ', # 0x83
'pN', # 0x84
'hh', # 0x85
'Q', # 0x86
'yo-ya', # 0x87
'yo-yae', # 0x88
'yo-i', # 0x89
'yu-yeo', # 0x8a
'yu-ye', # 0x8b
'yu-i', # 0x8c
'U', # 0x8d
'U-i', # 0x8e
'[?]', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'BU', # 0xa0
'ZI', # 0xa1
'JI', # 0xa2
'GU', # 0xa3
'EE', # 0xa4
'ENN', # 0xa5
'OO', # 0xa6
'ONN', # 0xa7
'IR', # 0xa8
'ANN', # 0xa9
'INN', # 0xaa
'UNN', # 0xab
'IM', # 0xac
'NGG', # 0xad
'AINN', # 0xae
'AUNN', # 0xaf
'AM', # 0xb0
'OM', # 0xb1
'ONG', # 0xb2
'INNN', # 0xb3
'P', # 0xb4
'T', # 0xb5
'K', # 0xb6
'H', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| |
from sqlalchemy import schema as sa_schema, types as sqltypes, sql
import logging
from .. import compat
import re
from ..compat import string_types
log = logging.getLogger(__name__)
try:
from sqlalchemy.sql.naming import conv
def _render_gen_name(autogen_context, name):
if isinstance(name, conv):
return _f_name(_alembic_autogenerate_prefix(autogen_context), name)
else:
return name
except ImportError:
def _render_gen_name(autogen_context, name):
return name
class _f_name(object):
def __init__(self, prefix, name):
self.prefix = prefix
self.name = name
def __repr__(self):
return "%sf(%r)" % (self.prefix, self.name)
def _render_potential_expr(value, autogen_context):
if isinstance(value, sql.ClauseElement):
if compat.sqla_08:
compile_kw = dict(compile_kwargs={'literal_binds': True})
else:
compile_kw = {}
return "%(prefix)stext(%(sql)r)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"sql": str(
value.compile(dialect=autogen_context['dialect'],
**compile_kw)
)
}
else:
return repr(value)
def _add_table(table, autogen_context):
text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % {
'tablename': table.name,
'prefix': _alembic_autogenerate_prefix(autogen_context),
'args': ',\n'.join(
[col for col in
[_render_column(col, autogen_context) for col in table.c]
if col] +
sorted([rcons for rcons in
[_render_constraint(cons, autogen_context) for cons in
table.constraints]
if rcons is not None
])
)
}
if table.schema:
text += ",\nschema=%r" % table.schema
for k in sorted(table.kwargs):
text += ",\n%s=%r" % (k.replace(" ", "_"), table.kwargs[k])
text += "\n)"
return text
def _drop_table(table, autogen_context):
text = "%(prefix)sdrop_table(%(tname)r" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": table.name
}
if table.schema:
text += ", schema=%r" % table.schema
text += ")"
return text
def _add_index(index, autogen_context):
"""
Generate Alembic operations for the CREATE INDEX of an
:class:`~sqlalchemy.schema.Index` instance.
"""
from .compare import _get_index_column_names
text = "%(prefix)screate_index(%(name)r, '%(table)s', %(columns)s, "\
"unique=%(unique)r%(schema)s%(kwargs)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': _render_gen_name(autogen_context, index.name),
'table': index.table.name,
'columns': _get_index_column_names(index),
'unique': index.unique or False,
'schema': (", schema='%s'" % index.table.schema) if index.table.schema else '',
'kwargs': (', '+', '.join(
["%s=%s" % (key, _render_potential_expr(val, autogen_context))
for key, val in index.kwargs.items()]))\
if len(index.kwargs) else ''
}
return text
def _drop_index(index, autogen_context):
"""
Generate Alembic operations for the DROP INDEX of an
:class:`~sqlalchemy.schema.Index` instance.
"""
text = "%(prefix)sdrop_index(%(name)r, "\
"table_name='%(table_name)s'%(schema)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': _render_gen_name(autogen_context, index.name),
'table_name': index.table.name,
'schema': ((", schema='%s'" % index.table.schema)
if index.table.schema else '')
}
return text
def _render_unique_constraint(constraint, autogen_context):
rendered = _user_defined_render("unique", constraint, autogen_context)
if rendered is not False:
return rendered
return _uq_constraint(constraint, autogen_context, False)
def _add_unique_constraint(constraint, autogen_context):
"""
Generate Alembic operations for the ALTER TABLE .. ADD CONSTRAINT ...
UNIQUE of a :class:`~sqlalchemy.schema.UniqueConstraint` instance.
"""
return _uq_constraint(constraint, autogen_context, True)
def _uq_constraint(constraint, autogen_context, alter):
opts = []
if constraint.deferrable:
opts.append(("deferrable", str(constraint.deferrable)))
if constraint.initially:
opts.append(("initially", str(constraint.initially)))
if alter and constraint.table.schema:
opts.append(("schema", str(constraint.table.schema)))
if not alter and constraint.name:
opts.append(("name", _render_gen_name(autogen_context, constraint.name)))
if alter:
args = [repr(_render_gen_name(autogen_context, constraint.name)),
repr(constraint.table.name)]
args.append(repr([col.name for col in constraint.columns]))
args.extend(["%s=%r" % (k, v) for k, v in opts])
return "%(prefix)screate_unique_constraint(%(args)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'args': ", ".join(args)
}
else:
args = [repr(col.name) for col in constraint.columns]
args.extend(["%s=%r" % (k, v) for k, v in opts])
return "%(prefix)sUniqueConstraint(%(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"args": ", ".join(args)
}
def _add_fk_constraint(constraint, autogen_context):
raise NotImplementedError()
def _add_pk_constraint(constraint, autogen_context):
raise NotImplementedError()
def _add_check_constraint(constraint, autogen_context):
raise NotImplementedError()
def _add_constraint(constraint, autogen_context):
"""
Dispatcher for the different types of constraints.
"""
funcs = {
"unique_constraint": _add_unique_constraint,
"foreign_key_constraint": _add_fk_constraint,
"primary_key_constraint": _add_pk_constraint,
"check_constraint": _add_check_constraint,
"column_check_constraint": _add_check_constraint,
}
return funcs[constraint.__visit_name__](constraint, autogen_context)
def _drop_constraint(constraint, autogen_context):
"""
Generate Alembic operations for the ALTER TABLE ... DROP CONSTRAINT
of a :class:`~sqlalchemy.schema.UniqueConstraint` instance.
"""
text = "%(prefix)sdrop_constraint(%(name)r, '%(table_name)s'%(schema)s)" % {
'prefix': _alembic_autogenerate_prefix(autogen_context),
'name': _render_gen_name(autogen_context, constraint.name),
'table_name': constraint.table.name,
'schema': (", schema='%s'" % constraint.table.schema)
if constraint.table.schema else '',
}
return text
def _add_column(schema, tname, column, autogen_context):
text = "%(prefix)sadd_column(%(tname)r, %(column)s" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": tname,
"column": _render_column(column, autogen_context)
}
if schema:
text += ", schema=%r" % schema
text += ")"
return text
def _drop_column(schema, tname, column, autogen_context):
text = "%(prefix)sdrop_column(%(tname)r, %(cname)r" % {
"prefix": _alembic_autogenerate_prefix(autogen_context),
"tname": tname,
"cname": column.name
}
if schema:
text += ", schema=%r" % schema
text += ")"
return text
def _modify_col(tname, cname,
autogen_context,
server_default=False,
type_=None,
nullable=None,
existing_type=None,
existing_nullable=None,
existing_server_default=False,
schema=None):
indent = " " * 11
text = "%(prefix)salter_column(%(tname)r, %(cname)r" % {
'prefix': _alembic_autogenerate_prefix(
autogen_context),
'tname': tname,
'cname': cname}
text += ",\n%sexisting_type=%s" % (indent,
_repr_type(existing_type, autogen_context))
if server_default is not False:
rendered = _render_server_default(
server_default, autogen_context)
text += ",\n%sserver_default=%s" % (indent, rendered)
if type_ is not None:
text += ",\n%stype_=%s" % (indent,
_repr_type(type_, autogen_context))
if nullable is not None:
text += ",\n%snullable=%r" % (
indent, nullable,)
if existing_nullable is not None:
text += ",\n%sexisting_nullable=%r" % (
indent, existing_nullable)
if existing_server_default:
rendered = _render_server_default(
existing_server_default,
autogen_context)
text += ",\n%sexisting_server_default=%s" % (
indent, rendered)
if schema:
text += ",\n%sschema=%r" % (indent, schema)
text += ")"
return text
def _user_autogenerate_prefix(autogen_context):
prefix = autogen_context['opts']['user_module_prefix']
if prefix is None:
return _sqlalchemy_autogenerate_prefix(autogen_context)
else:
return prefix
def _sqlalchemy_autogenerate_prefix(autogen_context):
return autogen_context['opts']['sqlalchemy_module_prefix'] or ''
def _alembic_autogenerate_prefix(autogen_context):
return autogen_context['opts']['alembic_module_prefix'] or ''
def _user_defined_render(type_, object_, autogen_context):
if 'opts' in autogen_context and \
'render_item' in autogen_context['opts']:
render = autogen_context['opts']['render_item']
if render:
rendered = render(type_, object_, autogen_context)
if rendered is not False:
return rendered
return False
def _render_column(column, autogen_context):
rendered = _user_defined_render("column", column, autogen_context)
if rendered is not False:
return rendered
opts = []
if column.server_default:
rendered = _render_server_default(
column.server_default, autogen_context
)
if rendered:
opts.append(("server_default", rendered))
if not column.autoincrement:
opts.append(("autoincrement", column.autoincrement))
if column.nullable is not None:
opts.append(("nullable", column.nullable))
# TODO: for non-ascii colname, assign a "key"
return "%(prefix)sColumn(%(name)r, %(type)s, %(kw)s)" % {
'prefix': _sqlalchemy_autogenerate_prefix(autogen_context),
'name': column.name,
'type': _repr_type(column.type, autogen_context),
'kw': ", ".join(["%s=%s" % (kwname, val) for kwname, val in opts])
}
def _render_server_default(default, autogen_context):
rendered = _user_defined_render("server_default", default, autogen_context)
if rendered is not False:
return rendered
if isinstance(default, sa_schema.DefaultClause):
if isinstance(default.arg, string_types):
default = default.arg
else:
default = str(default.arg.compile(
dialect=autogen_context['dialect']))
if isinstance(default, string_types):
# TODO: this is just a hack to get
# tests to pass until we figure out
# WTF sqlite is doing
default = re.sub(r"^'|'$", "", default)
return repr(default)
else:
return None
def _repr_type(type_, autogen_context):
rendered = _user_defined_render("type", type_, autogen_context)
if rendered is not False:
return rendered
mod = type(type_).__module__
imports = autogen_context.get('imports', None)
if mod.startswith("sqlalchemy.dialects"):
dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1)
if imports is not None:
imports.add("from sqlalchemy.dialects import %s" % dname)
return "%s.%r" % (dname, type_)
elif mod.startswith("sqlalchemy"):
prefix = _sqlalchemy_autogenerate_prefix(autogen_context)
return "%s%r" % (prefix, type_)
else:
prefix = _user_autogenerate_prefix(autogen_context)
return "%s%r" % (prefix, type_)
def _render_constraint(constraint, autogen_context):
renderer = _constraint_renderers.get(type(constraint), None)
if renderer:
return renderer(constraint, autogen_context)
else:
return None
def _render_primary_key(constraint, autogen_context):
rendered = _user_defined_render("primary_key", constraint, autogen_context)
if rendered is not False:
return rendered
if not constraint.columns:
return None
opts = []
if constraint.name:
opts.append(("name", repr(_render_gen_name(autogen_context, constraint.name))))
return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"args": ", ".join(
[repr(c.key) for c in constraint.columns] +
["%s=%s" % (kwname, val) for kwname, val in opts]
),
}
def _fk_colspec(fk, metadata_schema):
"""Implement a 'safe' version of ForeignKey._get_colspec() that
never tries to resolve the remote table.
"""
if metadata_schema is None:
return fk._get_colspec()
else:
# need to render schema breaking up tokens by hand, since the
# ForeignKeyConstraint here may not actually have a remote
# Table present
tokens = fk._colspec.split(".")
# no schema in the colspec, render it
if len(tokens) == 2:
return "%s.%s" % (metadata_schema, fk._colspec)
else:
return fk._colspec
def _render_foreign_key(constraint, autogen_context):
rendered = _user_defined_render("foreign_key", constraint, autogen_context)
if rendered is not False:
return rendered
opts = []
if constraint.name:
opts.append(("name", repr(_render_gen_name(autogen_context, constraint.name))))
if constraint.onupdate:
opts.append(("onupdate", repr(constraint.onupdate)))
if constraint.ondelete:
opts.append(("ondelete", repr(constraint.ondelete)))
if constraint.initially:
opts.append(("initially", repr(constraint.initially)))
if constraint.deferrable:
opts.append(("deferrable", repr(constraint.deferrable)))
if constraint.use_alter:
opts.append(("use_alter", repr(constraint.use_alter)))
apply_metadata_schema = constraint.parent.metadata.schema
return "%(prefix)sForeignKeyConstraint([%(cols)s], "\
"[%(refcols)s], %(args)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"cols": ", ".join("'%s'" % f.parent.key for f in constraint.elements),
"refcols": ", ".join(repr(_fk_colspec(f, apply_metadata_schema))
for f in constraint.elements),
"args": ", ".join(
["%s=%s" % (kwname, val) for kwname, val in opts]
),
}
def _render_check_constraint(constraint, autogen_context):
rendered = _user_defined_render("check", constraint, autogen_context)
if rendered is not False:
return rendered
# detect the constraint being part of
# a parent type which is probably in the Table already.
# ideally SQLAlchemy would give us more of a first class
# way to detect this.
if constraint._create_rule and \
hasattr(constraint._create_rule, 'target') and \
isinstance(constraint._create_rule.target,
sqltypes.TypeEngine):
return None
opts = []
if constraint.name:
opts.append(("name", repr(_render_gen_name(autogen_context, constraint.name))))
return "%(prefix)sCheckConstraint(%(sqltext)r%(opts)s)" % {
"prefix": _sqlalchemy_autogenerate_prefix(autogen_context),
"opts": ", " + (", ".join("%s=%s" % (k, v)
for k, v in opts)) if opts else "",
"sqltext": str(
constraint.sqltext.compile(
dialect=autogen_context['dialect']
)
)
}
_constraint_renderers = {
sa_schema.PrimaryKeyConstraint: _render_primary_key,
sa_schema.ForeignKeyConstraint: _render_foreign_key,
sa_schema.UniqueConstraint: _render_unique_constraint,
sa_schema.CheckConstraint: _render_check_constraint
}
| |
# tests common to dict and UserDict
import unittest
import collections
class BasicTestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
type2test = None # which class is being tested (overwrite in subclasses)
def _reference(self):
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {1:2, "key1":"value1", "key2":(1,2,3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self.type2test()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in data.items():
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
# A (key, value) pair not in the mapping
key, value = self.reference.popitem()
self.other = {key:value}
# A (key, value) pair in the mapping
key, value = self.reference.popitem()
self.inmapping = {key:value}
self.reference[key] = value
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in self.reference.items():
self.assertEqual(d[key], value)
knownkey = list(self.other.keys())[0]
self.assertRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#__contains__
for k in self.reference:
self.assertIn(k, d)
for k in self.other:
self.assertNotIn(k, d)
#cmp
self.assertEqual(p, p)
self.assertEqual(d, d)
self.assertNotEqual(p, d)
self.assertNotEqual(d, p)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, '__next__'))
self.assertTrue(hasattr(iter, '__iter__'))
x = list(iter)
self.assertTrue(set(x)==set(lst)==set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()),
self.reference.keys())
check_iterandlist(iter(d), list(d.keys()), self.reference.keys())
check_iterandlist(iter(d.values()), list(d.values()),
self.reference.values())
check_iterandlist(iter(d.items()), list(d.items()),
self.reference.items())
#get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertNotIn(knownkey, d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in self.reference.items():
p[key] = value
self.assertEqual(p[key], value)
for key in self.reference.keys():
del p[key]
self.assertRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
items = list(p.items())
p = self._empty_mapping()
p.update(items)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefault
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.assertNotIn(knownkey, d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.assertNotIn(knownkey, d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.assertNotIn(key, d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
def test_constructor(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
def test_bool(self):
self.assertTrue(not self._empty_mapping())
self.assertTrue(self.reference)
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self.reference) is True)
def test_keys(self):
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self.reference
self.assertIn(list(self.inmapping.keys())[0], d.keys())
self.assertNotIn(list(self.other.keys())[0], d.keys())
self.assertRaises(TypeError, d.keys, None)
def test_values(self):
d = self._empty_mapping()
self.assertEqual(list(d.values()), [])
self.assertRaises(TypeError, d.values, None)
def test_items(self):
d = self._empty_mapping()
self.assertEqual(list(d.items()), [])
self.assertRaises(TypeError, d.items, None)
def test_len(self):
d = self._empty_mapping()
self.assertEqual(len(d), 0)
def test_getitem(self):
d = self.reference
self.assertEqual(d[list(self.inmapping.keys())[0]],
list(self.inmapping.values())[0])
self.assertRaises(TypeError, d.__getitem__)
def test_update(self):
# mapping argument
d = self._empty_mapping()
d.update(self.other)
self.assertEqual(list(d.items()), list(self.other.items()))
# No argument
d = self._empty_mapping()
d.update()
self.assertEqual(d, self._empty_mapping())
# item sequence
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# Iterator
d = self._empty_mapping()
d.update(self.other.items())
self.assertEqual(list(d.items()), list(self.other.items()))
# FIXME: Doesn't work with UserDict
# self.assertRaises((TypeError, AttributeError), d.update, None)
self.assertRaises((TypeError, AttributeError), d.update, 42)
outerself = self
class SimpleUserDict:
def __init__(self):
self.d = outerself.reference
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
i1 = sorted(d.items())
i2 = sorted(self.reference.items())
self.assertEqual(i1, i2)
class Exc(Exception): pass
d = self._empty_mapping()
class FailingUserDict:
def keys(self):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d.clear()
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return 'a'
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
d = self._empty_mapping()
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, d.update, badseq())
self.assertRaises(ValueError, d.update, [(1, 2, 3)])
# no test_fromkeys or test_copy as both os.environ and selves don't support it
def test_get(self):
d = self._empty_mapping()
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
d = self.reference
self.assertTrue(d.get(list(self.other.keys())[0]) is None)
self.assertEqual(d.get(list(self.other.keys())[0], 3), 3)
self.assertEqual(d.get(list(self.inmapping.keys())[0]),
list(self.inmapping.values())[0])
self.assertEqual(d.get(list(self.inmapping.keys())[0], 3),
list(self.inmapping.values())[0])
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
d = self._empty_mapping()
self.assertRaises(TypeError, d.setdefault)
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
self.assertRaises(TypeError, d.popitem, 42)
def test_pop(self):
d = self._empty_mapping()
k, v = list(self.inmapping.items())[0]
d[k] = v
self.assertRaises(KeyError, d.pop, list(self.other.keys())[0])
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
class TestMappingProtocol(BasicTestMappingProtocol):
def test_constructor(self):
BasicTestMappingProtocol.test_constructor(self)
self.assertTrue(self._empty_mapping() is not self._empty_mapping())
self.assertEqual(self.type2test(x=1, y=2), {"x": 1, "y": 2})
def test_bool(self):
BasicTestMappingProtocol.test_bool(self)
self.assertTrue(not self._empty_mapping())
self.assertTrue(self._full_mapping({"x": "y"}))
self.assertTrue(bool(self._empty_mapping()) is False)
self.assertTrue(bool(self._full_mapping({"x": "y"})) is True)
def test_keys(self):
BasicTestMappingProtocol.test_keys(self)
d = self._empty_mapping()
self.assertEqual(list(d.keys()), [])
d = self._full_mapping({'a': 1, 'b': 2})
k = d.keys()
self.assertIn('a', k)
self.assertIn('b', k)
self.assertNotIn('c', k)
def test_values(self):
BasicTestMappingProtocol.test_values(self)
d = self._full_mapping({1:2})
self.assertEqual(list(d.values()), [2])
def test_items(self):
BasicTestMappingProtocol.test_items(self)
d = self._full_mapping({1:2})
self.assertEqual(list(d.items()), [(1, 2)])
def test_contains(self):
d = self._empty_mapping()
self.assertNotIn('a', d)
self.assertTrue(not ('a' in d))
self.assertTrue('a' not in d)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
BasicTestMappingProtocol.test_len(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(len(d), 2)
def test_getitem(self):
BasicTestMappingProtocol.test_getitem(self)
d = self._full_mapping({'a': 1, 'b': 2})
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
def test_clear(self):
d = self._full_mapping({1:1, 2:2, 3:3})
d.clear()
self.assertEqual(d, {})
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
BasicTestMappingProtocol.test_update(self)
# mapping argument
d = self._empty_mapping()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
self.assertEqual(d, {1:1, 2:2, 3:3})
# no argument
d.update()
self.assertEqual(d, {1:1, 2:2, 3:3})
# keyword arguments
d = self._empty_mapping()
d.update(x=100)
d.update(y=20)
d.update(x=1, y=2, z=3)
self.assertEqual(d, {"x":1, "y":2, "z":3})
# item sequence
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)])
self.assertEqual(d, {"x":100, "y":20})
# Both item sequence and keyword arguments
d = self._empty_mapping()
d.update([("x", 100), ("y", 20)], x=1, y=2)
self.assertEqual(d, {"x":1, "y":2})
# iterator
d = self._full_mapping({1:3, 2:4})
d.update(self._full_mapping({1:2, 3:4, 5:6}).items())
self.assertEqual(d, {1:2, 2:4, 3:4, 5:6})
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {1:1, 2:2, 3:3})
def test_fromkeys(self):
self.assertEqual(self.type2test.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
d = self._empty_mapping()
self.assertTrue(not(d.fromkeys('abc') is d))
self.assertEqual(d.fromkeys('abc'), {'a':None, 'b':None, 'c':None})
self.assertEqual(d.fromkeys((4,5),0), {4:0, 5:0})
self.assertEqual(d.fromkeys([]), {})
def g():
yield 1
self.assertEqual(d.fromkeys(g()), {1:None})
self.assertRaises(TypeError, {}.fromkeys, 3)
class dictlike(self.type2test): pass
self.assertEqual(dictlike.fromkeys('a'), {'a':None})
self.assertEqual(dictlike().fromkeys('a'), {'a':None})
self.assertTrue(dictlike.fromkeys('a').__class__ is dictlike)
self.assertTrue(dictlike().fromkeys('a').__class__ is dictlike)
self.assertTrue(type(dictlike.fromkeys('a')) is dictlike)
class mydict(self.type2test):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
self.assertRaises(TypeError, dict.fromkeys)
class Exc(Exception): pass
class baddict1(self.type2test):
def __init__(self):
raise Exc()
self.assertRaises(Exc, baddict1.fromkeys, [1])
class BadSeq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, self.type2test.fromkeys, BadSeq())
class baddict2(self.type2test):
def __setitem__(self, key, value):
raise Exc()
self.assertRaises(Exc, baddict2.fromkeys, [1])
def test_copy(self):
d = self._full_mapping({1:1, 2:2, 3:3})
self.assertEqual(d.copy(), {1:1, 2:2, 3:3})
d = self._empty_mapping()
self.assertEqual(d.copy(), d)
self.assertIsInstance(d.copy(), d.__class__)
self.assertRaises(TypeError, d.copy, None)
def test_get(self):
BasicTestMappingProtocol.test_get(self)
d = self._empty_mapping()
self.assertTrue(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
d = self._full_mapping({'a' : 1, 'b' : 2})
self.assertTrue(d.get('c') is None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
def test_setdefault(self):
BasicTestMappingProtocol.test_setdefault(self)
d = self._empty_mapping()
self.assertTrue(d.setdefault('key0') is None)
d.setdefault('key0', [])
self.assertTrue(d.setdefault('key0') is None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
def test_popitem(self):
BasicTestMappingProtocol.test_popitem(self)
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = self._empty_mapping()
b = self._empty_mapping()
for i in range(size):
a[repr(i)] = i
if copymode < 0:
b[repr(i)] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, int(ka))
kb, vb = tb = b.popitem()
self.assertEqual(vb, int(kb))
self.assertTrue(not(copymode < 0 and ta != tb))
self.assertTrue(not a)
self.assertTrue(not b)
def test_pop(self):
BasicTestMappingProtocol.test_pop(self)
# Tests for pop with specified key
d = self._empty_mapping()
k, v = 'abc', 'def'
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
class TestHashMappingProtocol(TestMappingProtocol):
def test_getitem(self):
TestMappingProtocol.test_getitem(self)
class Exc(Exception): pass
class BadEq(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 24
d = self._empty_mapping()
d[BadEq()] = 42
self.assertRaises(KeyError, d.__getitem__, 23)
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_fromkeys(self):
TestMappingProtocol.test_fromkeys(self)
class mydict(self.type2test):
def __new__(cls):
return collections.UserDict()
ud = mydict.fromkeys('ab')
self.assertEqual(ud, {'a':None, 'b':None})
self.assertIsInstance(ud, collections.UserDict)
def test_pop(self):
TestMappingProtocol.test_pop(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.pop, x)
def test_mutatingiteration(self):
d = self._empty_mapping()
d[1] = 1
try:
for i in d:
d[i+1] = 1
except RuntimeError:
pass
else:
self.fail("changing dict size during iteration doesn't raise Error")
def test_repr(self):
d = self._empty_mapping()
self.assertEqual(repr(d), '{}')
d[1] = 2
self.assertEqual(repr(d), '{1: 2}')
d = self._empty_mapping()
d[1] = d
self.assertEqual(repr(d), '{1: {...}}')
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = self._full_mapping({1: BadRepr()})
self.assertRaises(Exc, repr, d)
def test_eq(self):
self.assertEqual(self._empty_mapping(), self._empty_mapping())
self.assertEqual(self._full_mapping({1: 2}),
self._full_mapping({1: 2}))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
def __hash__(self):
return 1
d1 = self._full_mapping({BadCmp(): 1})
d2 = self._full_mapping({1: 1})
self.assertRaises(Exc, lambda: BadCmp()==1)
self.assertRaises(Exc, lambda: d1==d2)
def test_setdefault(self):
TestMappingProtocol.test_setdefault(self)
class Exc(Exception): pass
class BadHash(object):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
d = self._empty_mapping()
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.setdefault, x, [])
| |
from rest_framework import viewsets, filters, status
from rest_framework.response import Response
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from django_filters.rest_framework import DjangoFilterBackend
from daiquiri.core.adapter import DatabaseAdapter
from daiquiri.core.viewsets import ChoicesViewSet
from daiquiri.core.permissions import HasModelPermission
from daiquiri.core.constants import ACCESS_LEVEL_CHOICES
from django.conf import settings
from .models import Schema, Table, Column, Function
from .serializers import (
SchemaSerializer,
TableSerializer,
ColumnSerializer,
FunctionSerializer
)
from .serializers.export import (
SchemaSerializer as ExportSchemaSerializer,
FunctionSerializer as ExportFunctionSerializer
)
from .serializers.management import (
SchemaSerializer as ManagementSchemaSerializer,
FunctionSerializer as ManagementFunctionSerializer
)
from .serializers.user import (
SchemaSerializer as UserSchemaSerializer,
FunctionSerializer as UserFunctionSerializer
)
class SchemaViewSet(viewsets.ModelViewSet):
permission_classes = (HasModelPermission, )
authentication_classes = (SessionAuthentication, TokenAuthentication)
queryset = Schema.objects.all()
serializer_class = SchemaSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_fields = ('name', 'access_level', 'metadata_access_level')
search_fields = ('name', 'description')
ordering_fields = ('name', 'access_level', 'metadata_access_level')
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
schema = serializer.save()
if request.data.get('discover'):
adapter = DatabaseAdapter()
for table_metadata in adapter.fetch_tables(schema.name):
table_metadata['schema'] = schema.id
table_metadata['groups'] = [group.id for group in schema.groups.all()]
for key in ['license', 'access_level', 'metadata_access_level']:
table_metadata[key] = getattr(schema, key)
table_serializer = TableSerializer(data=table_metadata)
if table_serializer.is_valid():
table = table_serializer.save()
for column_metadata in adapter.fetch_columns(schema.name, table.name):
column_metadata['table'] = table.id
column_metadata['groups'] = [group.id for group in table.groups.all()]
for key in ['access_level', 'metadata_access_level']:
column_metadata[key] = getattr(table, key)
column_serializer = ColumnSerializer(data=column_metadata)
if column_serializer.is_valid():
column_serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=False)
def management(self, request):
queryset = Schema.objects.all()
serializer = ManagementSchemaSerializer(queryset, many=True)
return Response(serializer.data)
@action(detail=False, methods=['get'], permission_classes=[])
def user(self, request):
# filter the schemas which are published for the groups of the user
queryset = Schema.objects.filter_by_access_level(self.request.user)
serializer = UserSchemaSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@action(detail=False, methods=['get'], url_path='export', url_name='export-detail')
def export_list(self, request):
queryset = Schema.objects.all()
serializer = ExportSchemaSerializer(queryset, many=True)
return Response(serializer.data)
@action(detail=True, methods=['get'], url_path='export', url_name='export-detail')
def export_detail(self, request, pk=None):
queryset = Schema.objects.get(pk=pk)
serializer = ExportSchemaSerializer(queryset)
return Response(serializer.data)
class TableViewSet(viewsets.ModelViewSet):
permission_classes = (HasModelPermission, )
authentication_classes = (SessionAuthentication, TokenAuthentication)
queryset = Table.objects.all()
serializer_class = TableSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_fields = ('name', 'access_level', 'metadata_access_level')
search_fields = ('name', 'description')
ordering_fields = ('name', 'access_level', 'metadata_access_level')
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
table = serializer.save()
if request.data.get('discover'):
adapter = DatabaseAdapter()
for column_metadata in adapter.fetch_columns(table.schema.name, table.name):
column_metadata['table'] = table.id
column_metadata['groups'] = [group.id for group in table.groups.all()]
for key in ['access_level', 'metadata_access_level']:
column_metadata[key] = getattr(table, key)
column_serializer = ColumnSerializer(data=column_metadata)
if column_serializer.is_valid():
column_serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=False, methods=['get'])
def discover(self, request):
schema_name = request.GET.get('schema')
table_name = request.GET.get('table')
if schema_name and table_name:
adapter = DatabaseAdapter()
table_metadata = adapter.fetch_table(schema_name, table_name)
table_metadata['nrows'] = adapter.fetch_nrows(schema_name, table_name)
table_metadata['size'] = adapter.fetch_size(schema_name, table_name)
return Response([table_metadata])
else:
return Response([])
class ColumnViewSet(viewsets.ModelViewSet):
permission_classes = (HasModelPermission, )
authentication_classes = (SessionAuthentication, TokenAuthentication)
queryset = Column.objects.all()
serializer_class = ColumnSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_fields = ('name', 'access_level', 'metadata_access_level')
search_fields = ('name', 'description')
ordering_fields = ('name', 'access_level', 'metadata_access_level')
@action(detail=False, methods=['get'])
def discover(self, request):
schema_name = request.GET.get('schema')
table_name = request.GET.get('table')
column_name = request.GET.get('column')
if schema_name and table_name and column_name:
return Response([DatabaseAdapter().fetch_column(schema_name, table_name, column_name)])
else:
return Response([])
class FunctionViewSet(viewsets.ModelViewSet):
permission_classes = (HasModelPermission, )
authentication_classes = (SessionAuthentication, TokenAuthentication)
queryset = Function.objects.all()
serializer_class = FunctionSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
filter_fields = ('name', 'access_level', 'metadata_access_level')
search_fields = ('name', 'description')
ordering_fields = ('name', 'access_level', 'metadata_access_level')
@action(detail=False, methods=['get'])
def management(self, request):
queryset = Function.objects.all()
serializer = ManagementFunctionSerializer(queryset, many=True)
return Response(serializer.data)
@action(detail=False, methods=['get'])
def export(self, request):
queryset = Function.objects.all()
serializer = ExportFunctionSerializer(queryset, many=True)
return Response(serializer.data)
@action(detail=False, methods=['get'], permission_classes=[])
def user(self, request):
queryset = Function.objects.filter_by_access_level(self.request.user)
serializer = UserFunctionSerializer(queryset, many=True)
return Response(serializer.data)
class TableTypeViewSet(ChoicesViewSet):
permission_classes = (IsAuthenticated, )
authentication_classes = (SessionAuthentication, TokenAuthentication)
queryset = Table.TYPE_CHOICES
class LicenseViewSet(ChoicesViewSet):
permission_classes = (IsAuthenticated, )
authentication_classes = (SessionAuthentication, TokenAuthentication)
queryset = settings.LICENSE_CHOICES
class AccessLevelViewSet(ChoicesViewSet):
permission_classes = (IsAuthenticated, )
authentication_classes = (SessionAuthentication, TokenAuthentication)
queryset = ACCESS_LEVEL_CHOICES
| |
# Copyright (c) 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import six
from manila.common import constants
from manila import exception
from manila.i18n import _, _LI
from manila import utils
LOG = log.getLogger(__name__)
class ShareInstanceAccess(object):
def __init__(self, db, driver):
self.db = db
self.driver = driver
def update_access_rules(self, context, share_instance_id, add_rules=None,
delete_rules=None, share_server=None):
"""Update access rules in driver and database for given share instance.
:param context: current context
:param share_instance_id: Id of the share instance model
:param add_rules: list with ShareAccessMapping models or None - rules
which should be added
:param delete_rules: list with ShareAccessMapping models, "all", None
- rules which should be deleted. If "all" is provided - all rules will
be deleted.
:param share_server: Share server model or None
"""
share_instance = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
share_id = share_instance["share_id"]
@utils.synchronized(
"update_access_rules_for_share_%s" % share_id, external=True)
def _update_access_rules_locked(*args, **kwargs):
return self._update_access_rules(*args, **kwargs)
_update_access_rules_locked(
context=context,
share_instance_id=share_instance_id,
add_rules=add_rules,
delete_rules=delete_rules,
share_server=share_server,
)
def _update_access_rules(self, context, share_instance_id, add_rules=None,
delete_rules=None, share_server=None):
# Reget share instance
share_instance = self.db.share_instance_get(
context, share_instance_id, with_share_data=True)
# NOTE (rraja): preserve error state to trigger maintenance mode
if share_instance['access_rules_status'] != constants.STATUS_ERROR:
self.db.share_instance_update_access_status(
context,
share_instance_id,
constants.STATUS_UPDATING)
add_rules = add_rules or []
delete_rules = delete_rules or []
remove_rules = None
if six.text_type(delete_rules).lower() == "all":
# NOTE(ganso): if we are deleting an instance or clearing all
# the rules, we want to remove only the ones related
# to this instance.
delete_rules = self.db.share_access_get_all_for_instance(
context, share_instance['id'])
rules = []
else:
_rules = self.db.share_access_get_all_for_instance(
context, share_instance['id'])
rules = _rules
if delete_rules:
delete_ids = [rule['id'] for rule in delete_rules]
rules = list(filter(lambda r: r['id'] not in delete_ids,
rules))
# NOTE(ganso): trigger maintenance mode
if share_instance['access_rules_status'] == (
constants.STATUS_ERROR):
remove_rules = [
rule for rule in _rules
if rule["id"] in delete_ids]
delete_rules = []
try:
access_keys = None
try:
access_keys = self.driver.update_access(
context,
share_instance,
rules,
add_rules=add_rules,
delete_rules=delete_rules,
share_server=share_server
)
except NotImplementedError:
# NOTE(u_glide): Fallback to legacy allow_access/deny_access
# for drivers without update_access() method support
self._update_access_fallback(add_rules, context, delete_rules,
remove_rules, share_instance,
share_server)
if access_keys:
self._validate_access_keys(rules, add_rules, delete_rules,
access_keys)
for access_id, access_key in access_keys.items():
self.db.share_access_update_access_key(
context, access_id, access_key)
except Exception:
self.db.share_instance_update_access_status(
context,
share_instance['id'],
constants.STATUS_ERROR)
raise
# NOTE(ganso): remove rules after maintenance is complete
if remove_rules:
delete_rules = remove_rules
self._remove_access_rules(context, delete_rules, share_instance['id'])
share_instance = self.db.share_instance_get(context, share_instance_id,
with_share_data=True)
if self._check_needs_refresh(context, rules, share_instance):
self._update_access_rules(context, share_instance_id,
share_server=share_server)
else:
self.db.share_instance_update_access_status(
context,
share_instance['id'],
constants.STATUS_ACTIVE
)
LOG.info(_LI("Access rules were successfully applied for "
"share instance: %s"),
share_instance['id'])
@staticmethod
def _validate_access_keys(access_rules, add_rules, delete_rules,
access_keys):
if not isinstance(access_keys, dict):
msg = _("The access keys must be supplied as a dictionary that "
"maps rule IDs to access keys.")
raise exception.Invalid(message=msg)
actual_rule_ids = sorted(access_keys)
expected_rule_ids = []
if not (add_rules or delete_rules):
expected_rule_ids = [rule['id'] for rule in access_rules]
else:
expected_rule_ids = [rule['id'] for rule in add_rules]
if actual_rule_ids != sorted(expected_rule_ids):
msg = (_("The rule IDs supplied: %(actual)s do not match the "
"rule IDs that are expected: %(expected)s.")
% {'actual': actual_rule_ids,
'expected': expected_rule_ids})
raise exception.Invalid(message=msg)
for access_key in access_keys.values():
if not isinstance(access_key, six.string_types):
msg = (_("Access key %s is not string type.") % access_key)
raise exception.Invalid(message=msg)
def _check_needs_refresh(self, context, rules, share_instance):
rule_ids = set([rule['id'] for rule in rules])
queried_rules = self.db.share_access_get_all_for_instance(
context, share_instance['id'])
queried_ids = set([rule['id'] for rule in queried_rules])
access_rules_status = share_instance['access_rules_status']
return (access_rules_status == constants.STATUS_UPDATING_MULTIPLE or
rule_ids != queried_ids)
def _update_access_fallback(self, add_rules, context, delete_rules,
remove_rules, share_instance, share_server):
for rule in add_rules:
LOG.info(
_LI("Applying access rule '%(rule)s' for share "
"instance '%(instance)s'"),
{'rule': rule['id'], 'instance': share_instance['id']}
)
self.driver.allow_access(
context,
share_instance,
rule,
share_server=share_server
)
# NOTE(ganso): Fallback mode temporary compatibility workaround
if remove_rules:
delete_rules = remove_rules
for rule in delete_rules:
LOG.info(
_LI("Denying access rule '%(rule)s' from share "
"instance '%(instance)s'"),
{'rule': rule['id'], 'instance': share_instance['id']}
)
self.driver.deny_access(
context,
share_instance,
rule,
share_server=share_server
)
def _remove_access_rules(self, context, access_rules, share_instance_id):
if not access_rules:
return
for rule in access_rules:
access_mapping = self.db.share_instance_access_get(
context, rule['id'], share_instance_id)
self.db.share_instance_access_delete(context, access_mapping['id'])
| |
"""Unit tests for shp.
"""
import os
import tempfile
from nose import SkipTest
from nose.tools import assert_equal
import networkx as nx
class TestShp(object):
@classmethod
def setupClass(cls):
global ogr
try:
from osgeo import ogr
except ImportError:
raise SkipTest('ogr not available.')
def deletetmp(self, drv, *paths):
for p in paths:
if os.path.exists(p):
drv.DeleteDataSource(p)
def setUp(self):
def createlayer(driver, layerType=ogr.wkbLineString):
lyr = driver.CreateLayer("edges", None, layerType)
namedef = ogr.FieldDefn("Name", ogr.OFTString)
namedef.SetWidth(32)
lyr.CreateField(namedef)
return lyr
drv = ogr.GetDriverByName("ESRI Shapefile")
testdir = os.path.join(tempfile.gettempdir(), 'shpdir')
shppath = os.path.join(tempfile.gettempdir(), 'tmpshp.shp')
multi_shppath = os.path.join(tempfile.gettempdir(), 'tmp_mshp.shp')
self.deletetmp(drv, testdir, shppath, multi_shppath)
os.mkdir(testdir)
self.names = ['a', 'b', 'c', 'c'] # edgenames
self.paths = ([(1.0, 1.0), (2.0, 2.0)],
[(2.0, 2.0), (3.0, 3.0)],
[(0.9, 0.9), (4.0, 0.9), (4.0, 2.0)])
self.simplified_names = ['a', 'b', 'c'] # edgenames
self.simplified_paths = ([(1.0, 1.0), (2.0, 2.0)],
[(2.0, 2.0), (3.0, 3.0)],
[(0.9, 0.9), (4.0, 2.0)])
self.multi_names = ['a', 'a', 'a', 'a'] # edgenames
shp = drv.CreateDataSource(shppath)
lyr = createlayer(shp)
for path, name in zip(self.paths, self.names):
feat = ogr.Feature(lyr.GetLayerDefn())
g = ogr.Geometry(ogr.wkbLineString)
for p in path:
g.AddPoint_2D(*p)
feat.SetGeometry(g)
feat.SetField("Name", name)
lyr.CreateFeature(feat)
# create single record multiline shapefile for testing
multi_shp = drv.CreateDataSource(multi_shppath)
multi_lyr = createlayer(multi_shp, ogr.wkbMultiLineString)
multi_g = ogr.Geometry(ogr.wkbMultiLineString)
for path in self.paths:
g = ogr.Geometry(ogr.wkbLineString)
for p in path:
g.AddPoint_2D(*p)
multi_g.AddGeometry(g)
multi_feat = ogr.Feature(multi_lyr.GetLayerDefn())
multi_feat.SetGeometry(multi_g)
multi_feat.SetField("Name", 'a')
multi_lyr.CreateFeature(multi_feat)
self.shppath = shppath
self.multi_shppath = multi_shppath
self.testdir = testdir
self.drv = drv
def testload(self):
def compare_graph_paths_names(g, paths, names):
expected = nx.DiGraph()
for p in paths:
expected.add_path(p)
assert_equal(sorted(expected.node), sorted(g.node))
assert_equal(sorted(expected.edges()), sorted(g.edges()))
g_names = [g.get_edge_data(s, e)['Name'] for s, e in g.edges()]
assert_equal(names, sorted(g_names))
# simplified
G = nx.read_shp(self.shppath)
compare_graph_paths_names(G, self.simplified_paths, \
self.simplified_names)
# unsimplified
G = nx.read_shp(self.shppath, simplify=False)
compare_graph_paths_names(G, self.paths, self.names)
# multiline unsimplified
G = nx.read_shp(self.multi_shppath, simplify=False)
compare_graph_paths_names(G, self.paths, self.multi_names)
def checkgeom(self, lyr, expected):
feature = lyr.GetNextFeature()
actualwkt = []
while feature:
actualwkt.append(feature.GetGeometryRef().ExportToWkt())
feature = lyr.GetNextFeature()
assert_equal(sorted(expected), sorted(actualwkt))
def test_geometryexport(self):
expectedpoints_simple = (
"POINT (1 1)",
"POINT (2 2)",
"POINT (3 3)",
"POINT (0.9 0.9)",
"POINT (4 2)"
)
expectedlines_simple = (
"LINESTRING (1 1,2 2)",
"LINESTRING (2 2,3 3)",
"LINESTRING (0.9 0.9,4.0 0.9,4 2)"
)
expectedpoints = (
"POINT (1 1)",
"POINT (2 2)",
"POINT (3 3)",
"POINT (0.9 0.9)",
"POINT (4.0 0.9)",
"POINT (4 2)"
)
expectedlines = (
"LINESTRING (1 1,2 2)",
"LINESTRING (2 2,3 3)",
"LINESTRING (0.9 0.9,4.0 0.9)",
"LINESTRING (4.0 0.9,4 2)"
)
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
G = nx.read_shp(self.shppath)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), expectedpoints_simple)
self.checkgeom(shpdir.GetLayerByName("edges"), expectedlines_simple)
# Test unsimplified
# Nodes should have additional point,
# edges should be 'flattened'
G = nx.read_shp(self.shppath, simplify=False)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), expectedpoints)
self.checkgeom(shpdir.GetLayerByName("edges"), expectedlines)
def test_attributeexport(self):
def testattributes(lyr, graph):
feature = lyr.GetNextFeature()
while feature:
coords = []
ref = feature.GetGeometryRef()
last = ref.GetPointCount() - 1
edge_nodes = (ref.GetPoint_2D(0), ref.GetPoint_2D(last))
name = feature.GetFieldAsString('Name')
assert_equal(graph.get_edge_data(*edge_nodes)['Name'], name)
feature = lyr.GetNextFeature()
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
G = nx.read_shp(self.shppath)
nx.write_shp(G, tpath)
shpdir = ogr.Open(tpath)
edges = shpdir.GetLayerByName("edges")
testattributes(edges, G)
def test_wkt_export(self):
G = nx.DiGraph()
tpath = os.path.join(tempfile.gettempdir(), 'shpdir')
points = (
"POINT (0.9 0.9)",
"POINT (4 2)"
)
line = (
"LINESTRING (0.9 0.9,4 2)",
)
G.add_node(1, Wkt=points[0])
G.add_node(2, Wkt=points[1])
G.add_edge(1, 2, Wkt=line[0])
try:
nx.write_shp(G, tpath)
except Exception as e:
assert False, e
shpdir = ogr.Open(tpath)
self.checkgeom(shpdir.GetLayerByName("nodes"), points)
self.checkgeom(shpdir.GetLayerByName("edges"), line)
def tearDown(self):
self.deletetmp(self.drv, self.testdir, self.shppath)
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import uuid
from mock import MagicMock, patch, ANY
from novaclient.client import Client
from novaclient.v2.flavors import FlavorManager, Flavor
from novaclient.v2.servers import Server, ServerManager
from oslo_config import cfg
from testtools.matchers import Equals, Is, Not
from trove.backup.models import Backup
from trove.common import exception
from trove.common import instance as rd_instance
from trove.common import remote
from trove.datastore import models as datastore_models
import trove.extensions.mgmt.instances.models as mgmtmodels
from trove.guestagent.api import API
from trove.instance.models import DBInstance
from trove.instance.models import InstanceServiceStatus
from trove.instance.tasks import InstanceTasks
from trove import rpc
from trove.tests.unittests import trove_testtools
from trove.tests.unittests.util import util
CONF = cfg.CONF
class MockMgmtInstanceTest(trove_testtools.TestCase):
@classmethod
def setUpClass(cls):
util.init_db()
cls.version_id = str(uuid.uuid4())
cls.datastore = datastore_models.DBDatastore.create(
id=str(uuid.uuid4()),
name='mysql' + str(uuid.uuid4()),
default_version_id=cls.version_id
)
cls.version = datastore_models.DBDatastoreVersion.create(
id=cls.version_id,
datastore_id=cls.datastore.id,
name='5.5' + str(uuid.uuid4()),
manager='mysql',
image_id=str(uuid.uuid4()),
active=1,
packages="mysql-server-5.5"
)
super(MockMgmtInstanceTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.version.delete()
cls.datastore.delete()
super(MockMgmtInstanceTest, cls).tearDownClass()
def setUp(self):
self.context = trove_testtools.TroveTestContext(self)
self.context.auth_token = 'some_secret_password'
self.client = MagicMock(spec=Client)
self.server_mgr = MagicMock(spec=ServerManager)
self.client.servers = self.server_mgr
self.flavor_mgr = MagicMock(spec=FlavorManager)
self.client.flavors = self.flavor_mgr
self.admin_client_patch = patch.object(
remote, 'create_admin_nova_client', return_value=self.client)
self.addCleanup(self.admin_client_patch.stop)
self.admin_client_patch.start()
CONF.set_override('host', '127.0.0.1')
CONF.set_override('exists_notification_interval', 1)
CONF.set_override('notification_service_id', {'mysql': '123'})
super(MockMgmtInstanceTest, self).setUp()
def do_cleanup(self, instance, status):
instance.delete()
status.delete()
def build_db_instance(self, status, task_status=InstanceTasks.NONE):
instance = DBInstance(InstanceTasks.NONE,
name='test_name',
id=str(uuid.uuid4()),
flavor_id='flavor_1',
datastore_version_id=self.version.id,
compute_instance_id='compute_id_1',
server_id='server_id_1',
tenant_id='tenant_id_1',
server_status=rd_instance.ServiceStatuses.
BUILDING.api_status,
deleted=False)
instance.save()
service_status = InstanceServiceStatus(
rd_instance.ServiceStatuses.RUNNING,
id=str(uuid.uuid4()),
instance_id=instance.id,
)
service_status.save()
instance.set_task_status(task_status)
instance.server_status = status
instance.save()
return instance, service_status
class TestNotificationTransformer(MockMgmtInstanceTest):
@classmethod
def setUpClass(cls):
super(TestNotificationTransformer, cls).setUpClass()
@patch('trove.instance.models.LOG')
def test_transformer(self, mock_logging):
status = rd_instance.ServiceStatuses.BUILDING.api_status
instance, service_status = self.build_db_instance(
status, InstanceTasks.BUILDING)
payloads = mgmtmodels.NotificationTransformer(
context=self.context)()
self.assertIsNotNone(payloads)
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'], Not(Is(None)))
self.assertIn(status.lower(), [db['state'] for db in payloads])
self.addCleanup(self.do_cleanup, instance, service_status)
def test_get_service_id(self):
id_map = {
'mysql': '123',
'percona': 'abc'
}
transformer = mgmtmodels.NotificationTransformer(context=self.context)
self.assertThat(transformer._get_service_id('mysql', id_map),
Equals('123'))
@patch('trove.extensions.mgmt.instances.models.LOG')
def test_get_service_id_unknown(self, mock_logging):
id_map = {
'mysql': '123',
'percona': 'abc'
}
transformer = mgmtmodels.NotificationTransformer(context=self.context)
self.assertThat(transformer._get_service_id('m0ng0', id_map),
Equals('unknown-service-id-error'))
class TestNovaNotificationTransformer(MockMgmtInstanceTest):
@classmethod
def setUpClass(cls):
super(TestNovaNotificationTransformer, cls).setUpClass()
def test_transformer_cache(self):
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
transformer2 = mgmtmodels.NovaNotificationTransformer(
context=self.context)
self.assertThat(transformer._flavor_cache,
Not(Is(transformer2._flavor_cache)))
def test_lookup_flavor(self):
flavor = MagicMock(spec=Flavor)
flavor.name = 'flav_1'
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
with patch.object(self.flavor_mgr, 'get', side_effect=[flavor, None]):
self.assertThat(transformer._lookup_flavor('1'),
Equals(flavor.name))
self.assertThat(transformer._lookup_flavor('2'),
Equals('unknown'))
def test_transformer(self):
status = rd_instance.ServiceStatuses.BUILDING.api_status
instance, service_status = self.build_db_instance(
status, InstanceTasks.BUILDING)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
instance,
server,
service_status)
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
payloads = transformer()
self.assertIsNotNone(payloads)
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'],
Not(Is(None)))
self.assertThat(payload['state'], Not(Is(None)))
self.assertThat(payload['instance_type'],
Equals('db.small'))
self.assertThat(payload['instance_type_id'],
Equals('flavor_1'))
self.assertThat(payload['user_id'], Equals('test_user_id'))
self.assertThat(payload['service_id'], Equals('123'))
self.addCleanup(self.do_cleanup, instance, service_status)
@patch('trove.extensions.mgmt.instances.models.LOG')
def test_transformer_invalid_datastore_manager(self, mock_logging):
status = rd_instance.ServiceStatuses.BUILDING.api_status
instance, service_status = self.build_db_instance(
status, InstanceTasks.BUILDING)
version = datastore_models.DBDatastoreVersion.get_by(
id=instance.datastore_version_id)
version.update(manager='something invalid')
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
instance,
server,
service_status)
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr,
'get', return_value=flavor):
payloads = transformer()
# assertions
self.assertIsNotNone(payloads)
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'],
Not(Is(None)))
self.assertIn(status.lower(),
[db['state']
for db in payloads])
self.assertThat(payload['instance_type'],
Equals('db.small'))
self.assertThat(payload['instance_type_id'],
Equals('flavor_1'))
self.assertThat(payload['user_id'],
Equals('test_user_id'))
self.assertThat(payload['service_id'],
Equals('unknown-service-id-error'))
version.update(manager='mysql')
self.addCleanup(self.do_cleanup, instance, service_status)
def test_transformer_shutdown_instance(self):
status = rd_instance.ServiceStatuses.SHUTDOWN.api_status
instance, service_status = self.build_db_instance(status)
service_status.set_status(rd_instance.ServiceStatuses.SHUTDOWN)
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
instance,
server,
service_status)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
with patch.object(Backup, 'running', return_value=None):
self.assertThat(mgmt_instance.status, Equals('SHUTDOWN'))
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
payloads = transformer()
# assertion that SHUTDOWN instances are not reported
self.assertIsNotNone(payloads)
self.assertNotIn(status.lower(),
[db['status']
for db in payloads])
self.addCleanup(self.do_cleanup, instance, service_status)
def test_transformer_no_nova_instance(self):
status = rd_instance.ServiceStatuses.SHUTDOWN.api_status
instance, service_status = self.build_db_instance(status)
service_status.set_status(rd_instance.ServiceStatuses.SHUTDOWN)
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
instance,
None,
service_status)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
with patch.object(Backup, 'running', return_value=None):
self.assertThat(mgmt_instance.status, Equals('SHUTDOWN'))
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
payloads = transformer()
# assertion that SHUTDOWN instances are not reported
self.assertIsNotNone(payloads)
self.assertNotIn(status.lower(),
[db['status']
for db in payloads])
self.addCleanup(self.do_cleanup, instance, service_status)
def test_transformer_flavor_cache(self):
status = rd_instance.ServiceStatuses.BUILDING.api_status
instance, service_status = self.build_db_instance(
status, InstanceTasks.BUILDING)
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
instance,
server,
service_status)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
transformer = mgmtmodels.NovaNotificationTransformer(
context=self.context)
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
transformer()
payloads = transformer()
self.assertIsNotNone(payloads)
self.assertThat(len(payloads), Equals(1))
payload = payloads[0]
self.assertThat(payload['audit_period_beginning'],
Not(Is(None)))
self.assertThat(payload['audit_period_ending'], Not(Is(None)))
self.assertIn(status.lower(),
[db['state']
for db in payloads])
self.assertThat(payload['instance_type'], Equals('db.small'))
self.assertThat(payload['instance_type_id'],
Equals('flavor_1'))
self.assertThat(payload['user_id'], Equals('test_user_id'))
# ensure cache was used to get flavor second time
self.flavor_mgr.get.assert_any_call('flavor_1')
self.addCleanup(self.do_cleanup, instance, service_status)
class TestMgmtInstanceTasks(MockMgmtInstanceTest):
@classmethod
def setUpClass(cls):
super(TestMgmtInstanceTasks, cls).setUpClass()
def test_public_exists_events(self):
status = rd_instance.ServiceStatuses.BUILDING.api_status
instance, service_status = self.build_db_instance(
status, task_status=InstanceTasks.BUILDING)
server = MagicMock(spec=Server)
server.user_id = 'test_user_id'
mgmt_instance = mgmtmodels.SimpleMgmtInstance(self.context,
instance,
server,
service_status)
flavor = MagicMock(spec=Flavor)
flavor.name = 'db.small'
notifier = MagicMock()
with patch.object(rpc, 'get_notifier', return_value=notifier):
with patch.object(mgmtmodels, 'load_mgmt_instances',
return_value=[mgmt_instance]):
with patch.object(self.flavor_mgr, 'get', return_value=flavor):
self.assertThat(self.context.auth_token,
Is('some_secret_password'))
with patch.object(notifier, 'info', return_value=None):
# invocation
mgmtmodels.publish_exist_events(
mgmtmodels.NovaNotificationTransformer(
context=self.context),
self.context)
# assertion
notifier.info.assert_any_call(
self.context, 'trove.instance.exists', ANY)
self.assertThat(self.context.auth_token, Is(None))
self.addCleanup(self.do_cleanup, instance, service_status)
class TestMgmtInstanceDeleted(MockMgmtInstanceTest):
def test_show_deleted_mgmt_instances(self):
args = {'deleted': 0, 'cluster_id': None}
db_infos_active = DBInstance.find_all(**args)
args = {'deleted': 1, 'cluster_id': None}
db_infos_deleted = DBInstance.find_all(**args)
args = {'cluster_id': None}
# db_infos_all = DBInstance.find_all(**args)
# TODO(SlickNik) Fix this assert to work reliably in the gate.
# This fails intermittenly when the unit tests run in parallel.
# self.assertTrue(db_infos_all.count() ==
# db_infos_active.count() +
# db_infos_deleted.count())
with patch.object(self.context, 'is_admin', return_value=True):
deleted_instance = db_infos_deleted.all()[0] if len(
db_infos_deleted.all()) > 0 else None
active_instance = db_infos_active.all()[0] if len(
db_infos_active.all()) > 0 else None
if active_instance:
instance = DBInstance.find_by(context=self.context,
id=active_instance.id)
self.assertEqual(active_instance.id, instance.id)
if deleted_instance:
self.assertRaises(
exception.ModelNotFoundError,
DBInstance.find_by,
context=self.context,
id=deleted_instance.id,
deleted=False)
instance = DBInstance.find_by(context=self.context,
id=deleted_instance.id,
deleted=True)
self.assertEqual(deleted_instance.id, instance.id)
class TestMgmtInstancePing(MockMgmtInstanceTest):
def test_rpc_ping(self):
status = rd_instance.ServiceStatuses.RUNNING.api_status
instance, service_status = self.build_db_instance(
status, task_status=InstanceTasks.NONE)
mgmt_instance = mgmtmodels.MgmtInstance(instance,
instance,
None,
service_status)
with patch.object(API, 'rpc_ping', return_value=True):
with patch.object(API, 'get_client'):
self.assertTrue(mgmt_instance.rpc_ping())
self.addCleanup(self.do_cleanup, instance, service_status)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import unittest
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun, TaskInstance as TI
from airflow.operators.check_operator import (
CheckOperator,
IntervalCheckOperator,
ThresholdCheckOperator,
ValueCheckOperator,
)
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.sql import BranchSQLOperator
from airflow.utils import timezone
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.providers.apache.hive import TestHiveEnvironment
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
INTERVAL = datetime.timedelta(hours=12)
SUPPORTED_TRUE_VALUES = [
["True"],
["true"],
["1"],
["on"],
[1],
True,
"true",
"1",
"on",
1,
]
SUPPORTED_FALSE_VALUES = [
["False"],
["false"],
["0"],
["off"],
[0],
False,
"false",
"0",
"off",
0,
]
class TestCheckOperator(unittest.TestCase):
@mock.patch.object(CheckOperator, "get_db_hook")
def test_execute_no_records(self, mock_get_db_hook):
mock_get_db_hook.return_value.get_first.return_value = []
with self.assertRaises(AirflowException):
CheckOperator(sql="sql").execute()
@mock.patch.object(CheckOperator, "get_db_hook")
def test_execute_not_all_records_are_true(self, mock_get_db_hook):
mock_get_db_hook.return_value.get_first.return_value = ["data", ""]
with self.assertRaises(AirflowException):
CheckOperator(sql="sql").execute()
class TestValueCheckOperator(unittest.TestCase):
def setUp(self):
self.task_id = "test_task"
self.conn_id = "default_conn"
def _construct_operator(self, sql, pass_value, tolerance=None):
dag = DAG("test_dag", start_date=datetime.datetime(2017, 1, 1))
return ValueCheckOperator(
dag=dag,
task_id=self.task_id,
conn_id=self.conn_id,
sql=sql,
pass_value=pass_value,
tolerance=tolerance,
)
def test_pass_value_template_string(self):
pass_value_str = "2018-03-22"
operator = self._construct_operator("select date from tab1;", "{{ ds }}")
operator.render_template_fields({"ds": pass_value_str})
self.assertEqual(operator.task_id, self.task_id)
self.assertEqual(operator.pass_value, pass_value_str)
def test_pass_value_template_string_float(self):
pass_value_float = 4.0
operator = self._construct_operator("select date from tab1;", pass_value_float)
operator.render_template_fields({})
self.assertEqual(operator.task_id, self.task_id)
self.assertEqual(operator.pass_value, str(pass_value_float))
@mock.patch.object(ValueCheckOperator, "get_db_hook")
def test_execute_pass(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [10]
mock_get_db_hook.return_value = mock_hook
sql = "select value from tab1 limit 1;"
operator = self._construct_operator(sql, 5, 1)
operator.execute(None)
mock_hook.get_first.assert_called_once_with(sql)
@mock.patch.object(ValueCheckOperator, "get_db_hook")
def test_execute_fail(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("select value from tab1 limit 1;", 5, 1)
with self.assertRaisesRegex(AirflowException, "Tolerance:100.0%"):
operator.execute()
class TestIntervalCheckOperator(unittest.TestCase):
def _construct_operator(self, table, metric_thresholds, ratio_formula, ignore_zero):
return IntervalCheckOperator(
task_id="test_task",
table=table,
metrics_thresholds=metric_thresholds,
ratio_formula=ratio_formula,
ignore_zero=ignore_zero,
)
def test_invalid_ratio_formula(self):
with self.assertRaisesRegex(AirflowException, "Invalid diff_method"):
self._construct_operator(
table="test_table",
metric_thresholds={
"f1": 1,
},
ratio_formula="abs",
ignore_zero=False,
)
@mock.patch.object(IntervalCheckOperator, "get_db_hook")
def test_execute_not_ignore_zero(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [0]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table="test_table",
metric_thresholds={
"f1": 1,
},
ratio_formula="max_over_min",
ignore_zero=False,
)
with self.assertRaises(AirflowException):
operator.execute()
@mock.patch.object(IntervalCheckOperator, "get_db_hook")
def test_execute_ignore_zero(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [0]
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table="test_table",
metric_thresholds={
"f1": 1,
},
ratio_formula="max_over_min",
ignore_zero=True,
)
operator.execute()
@mock.patch.object(IntervalCheckOperator, "get_db_hook")
def test_execute_min_max(self, mock_get_db_hook):
mock_hook = mock.Mock()
def returned_row():
rows = [
[2, 2, 2, 2], # reference
[1, 1, 1, 1], # current
]
yield from rows
mock_hook.get_first.side_effect = returned_row()
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table="test_table",
metric_thresholds={
"f0": 1.0,
"f1": 1.5,
"f2": 2.0,
"f3": 2.5,
},
ratio_formula="max_over_min",
ignore_zero=True,
)
with self.assertRaisesRegex(AirflowException, "f0, f1, f2"):
operator.execute()
@mock.patch.object(IntervalCheckOperator, "get_db_hook")
def test_execute_diff(self, mock_get_db_hook):
mock_hook = mock.Mock()
def returned_row():
rows = [
[3, 3, 3, 3], # reference
[1, 1, 1, 1], # current
]
yield from rows
mock_hook.get_first.side_effect = returned_row()
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator(
table="test_table",
metric_thresholds={
"f0": 0.5,
"f1": 0.6,
"f2": 0.7,
"f3": 0.8,
},
ratio_formula="relative_diff",
ignore_zero=True,
)
with self.assertRaisesRegex(AirflowException, "f0, f1"):
operator.execute()
class TestThresholdCheckOperator(unittest.TestCase):
def _construct_operator(self, sql, min_threshold, max_threshold):
dag = DAG("test_dag", start_date=datetime.datetime(2017, 1, 1))
return ThresholdCheckOperator(
task_id="test_task",
sql=sql,
min_threshold=min_threshold,
max_threshold=max_threshold,
dag=dag,
)
@mock.patch.object(ThresholdCheckOperator, "get_db_hook")
def test_pass_min_value_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = (10,)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select avg(val) from table1 limit 1", 1, 100)
operator.execute()
@mock.patch.object(ThresholdCheckOperator, "get_db_hook")
def test_fail_min_value_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = (10,)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select avg(val) from table1 limit 1", 20, 100)
with self.assertRaisesRegex(AirflowException, "10.*20.0.*100.0"):
operator.execute()
@mock.patch.object(ThresholdCheckOperator, "get_db_hook")
def test_pass_min_sql_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select 10", "Select 1", "Select 100")
operator.execute()
@mock.patch.object(ThresholdCheckOperator, "get_db_hook")
def test_fail_min_sql_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select 10", "Select 20", "Select 100")
with self.assertRaisesRegex(AirflowException, "10.*20.*100"):
operator.execute()
@mock.patch.object(ThresholdCheckOperator, "get_db_hook")
def test_pass_min_value_max_sql(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select 75", 45, "Select 100")
operator.execute()
@mock.patch.object(ThresholdCheckOperator, "get_db_hook")
def test_fail_min_sql_max_value(self, mock_get_db_hook):
mock_hook = mock.Mock()
mock_hook.get_first.side_effect = lambda x: (int(x.split()[1]),)
mock_get_db_hook.return_value = mock_hook
operator = self._construct_operator("Select 155", "Select 45", 100)
with self.assertRaisesRegex(AirflowException, "155.*45.*100.0"):
operator.execute()
class TestSqlBranch(TestHiveEnvironment, unittest.TestCase):
"""
Test for SQL Branch Operator
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def setUp(self):
super().setUp()
self.dag = DAG(
"sql_branch_operator_test",
default_args={"owner": "airflow", "start_date": DEFAULT_DATE},
schedule_interval=INTERVAL,
)
self.branch_1 = DummyOperator(task_id="branch_1", dag=self.dag)
self.branch_2 = DummyOperator(task_id="branch_2", dag=self.dag)
self.branch_3 = None
def tearDown(self):
super().tearDown()
with create_session() as session:
session.query(DagRun).delete()
session.query(TI).delete()
def test_unsupported_conn_type(self):
"""Check if BranchSQLOperator throws an exception for unsupported connection type """
op = BranchSQLOperator(
task_id="make_choice",
conn_id="redis_default",
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
with self.assertRaises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_invalid_conn(self):
"""Check if BranchSQLOperator throws an exception for invalid connection """
op = BranchSQLOperator(
task_id="make_choice",
conn_id="invalid_connection",
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
with self.assertRaises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_invalid_follow_task_true(self):
"""Check if BranchSQLOperator throws an exception for invalid connection """
op = BranchSQLOperator(
task_id="make_choice",
conn_id="invalid_connection",
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
follow_task_ids_if_true=None,
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
with self.assertRaises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_invalid_follow_task_false(self):
"""Check if BranchSQLOperator throws an exception for invalid connection """
op = BranchSQLOperator(
task_id="make_choice",
conn_id="invalid_connection",
sql="SELECT count(1) FROM INFORMATION_SCHEMA.TABLES",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false=None,
dag=self.dag,
)
with self.assertRaises(AirflowException):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@pytest.mark.backend("mysql")
def test_sql_branch_operator_mysql(self):
"""Check if BranchSQLOperator works with backend """
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@pytest.mark.backend("postgres")
def test_sql_branch_operator_postgres(self):
"""Check if BranchSQLOperator works with backend """
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="postgres_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
@mock.patch("airflow.operators.sql.BaseHook")
def test_branch_single_value_with_dag_run(self, mock_hook):
"""Check BranchSQLOperator branch operation """
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
mock_hook.get_connection("mysql_default").conn_type = "mysql"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_first
mock_get_records.return_value = 1
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "make_choice":
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == "branch_1":
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == "branch_2":
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f"Invalid task id {ti.task_id} found!")
@mock.patch("airflow.operators.sql.BaseHook")
def test_branch_true_with_dag_run(self, mock_hook):
"""Check BranchSQLOperator branch operation """
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
mock_hook.get_connection("mysql_default").conn_type = "mysql"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_first
for true_value in SUPPORTED_TRUE_VALUES:
mock_get_records.return_value = true_value
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "make_choice":
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == "branch_1":
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == "branch_2":
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f"Invalid task id {ti.task_id} found!")
@mock.patch("airflow.operators.sql.BaseHook")
def test_branch_false_with_dag_run(self, mock_hook):
"""Check BranchSQLOperator branch operation """
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
mock_hook.get_connection("mysql_default").conn_type = "mysql"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_first
for false_value in SUPPORTED_FALSE_VALUES:
mock_get_records.return_value = false_value
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "make_choice":
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == "branch_1":
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == "branch_2":
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f"Invalid task id {ti.task_id} found!")
@mock.patch("airflow.operators.sql.BaseHook")
def test_branch_list_with_dag_run(self, mock_hook):
"""Checks if the BranchSQLOperator supports branching off to a list of tasks."""
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true=["branch_1", "branch_2"],
follow_task_ids_if_false="branch_3",
dag=self.dag,
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.branch_3 = DummyOperator(task_id="branch_3", dag=self.dag)
self.branch_3.set_upstream(branch_op)
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
mock_hook.get_connection("mysql_default").conn_type = "mysql"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_first
mock_get_records.return_value = [["1"]]
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "make_choice":
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == "branch_1":
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == "branch_2":
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == "branch_3":
self.assertEqual(ti.state, State.SKIPPED)
else:
raise ValueError(f"Invalid task id {ti.task_id} found!")
@mock.patch("airflow.operators.sql.BaseHook")
def test_invalid_query_result_with_dag_run(self, mock_hook):
"""Check BranchSQLOperator branch operation """
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
self.branch_1.set_upstream(branch_op)
self.branch_2.set_upstream(branch_op)
self.dag.clear()
self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
mock_hook.get_connection("mysql_default").conn_type = "mysql"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_first
mock_get_records.return_value = ["Invalid Value"]
with self.assertRaises(AirflowException):
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
@mock.patch("airflow.operators.sql.BaseHook")
def test_with_skip_in_branch_downstream_dependencies(self, mock_hook):
"""Test SQL Branch with skipping all downstream dependencies """
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
branch_op >> self.branch_1 >> self.branch_2
branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
mock_hook.get_connection("mysql_default").conn_type = "mysql"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_first
for true_value in SUPPORTED_TRUE_VALUES:
mock_get_records.return_value = [true_value]
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "make_choice":
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == "branch_1":
self.assertEqual(ti.state, State.NONE)
elif ti.task_id == "branch_2":
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f"Invalid task id {ti.task_id} found!")
@mock.patch("airflow.operators.sql.BaseHook")
def test_with_skip_in_branch_downstream_dependencies2(self, mock_hook):
"""Test skipping downstream dependency for false condition"""
branch_op = BranchSQLOperator(
task_id="make_choice",
conn_id="mysql_default",
sql="SELECT 1",
follow_task_ids_if_true="branch_1",
follow_task_ids_if_false="branch_2",
dag=self.dag,
)
branch_op >> self.branch_1 >> self.branch_2
branch_op >> self.branch_2
self.dag.clear()
dr = self.dag.create_dagrun(
run_id="manual__",
start_date=timezone.utcnow(),
execution_date=DEFAULT_DATE,
state=State.RUNNING,
)
mock_hook.get_connection("mysql_default").conn_type = "mysql"
mock_get_records = mock_hook.get_connection.return_value.get_hook.return_value.get_first
for false_value in SUPPORTED_FALSE_VALUES:
mock_get_records.return_value = [false_value]
branch_op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == "make_choice":
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == "branch_1":
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == "branch_2":
self.assertEqual(ti.state, State.NONE)
else:
raise ValueError(f"Invalid task id {ti.task_id} found!")
| |
# =============================================================================
# PROJECT CHRONO - http:#projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http:#projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Simone Benatti
# =============================================================================
#
# FEA for 3D beams and constraints
#
# =============================================================================
import math as m
import pychrono as chrono
import pychrono.fea as fea
import pychrono.pardisomkl as mkl
import pychrono.irrlicht as chronoirr
import os
# Create a motor between the truss and the crank:
class ChFunction_myf (chrono.ChFunction):
def __init__(self):
chrono.ChFunction.__init__(self)
def Get_y(self,x):
if (x > 0.4):
return chrono.CH_C_PI
else:
return -chrono.CH_C_PI * (1.0 - m.cos(chrono.CH_C_PI * x / 0.4)) / 2.0
# Output directory
out_dir = chrono.GetChronoOutputPath() + "BEAM_BUCKLING"
print( "Copyright (c) 2017 projectchrono.org \n")
# Create a Chrono::Engine physical system
my_system = chrono.ChSystemSMC()
# Create the Irrlicht visualization (open the Irrlicht device,
# bind a simple user interface, etc. etc.)
application = chronoirr.ChIrrApp(my_system, "Beams and constraints", chronoirr.dimension2du(800, 600), False, True)
# Easy shortcuts to add camera, lights, logo and sky in Irrlicht scene:
application.AddTypicalLogo()
application.AddTypicalSky()
application.AddTypicalLights()
application.AddTypicalCamera(chronoirr.vector3df(0.0, 0.6, -1.0))
L = 1
H = 0.25
K = 0.05
vA = chrono.ChVectorD(0, 0, 0)
vC = chrono.ChVectorD(L, 0, 0)
vB = chrono.ChVectorD(L, -H, 0)
vG = chrono.ChVectorD(L - K, -H, 0)
vd = chrono.ChVectorD(0, 0, 0.0001)
# Create a truss:
body_truss = chrono.ChBody()
body_truss.SetBodyFixed(True)
my_system.AddBody(body_truss)
# Attach a 'box' shape asset for visualization.
mboxtruss = chrono.ChBoxShape()
mboxtruss.GetBoxGeometry().Pos = chrono.ChVectorD(-0.01, 0, 0)
mboxtruss.GetBoxGeometry().SetLengths(chrono.ChVectorD(0.02, 0.2, 0.1))
body_truss.AddAsset(mboxtruss)
# Create body for crank
body_crank = chrono.ChBody()
body_crank.SetPos((vB + vG) * 0.5)
my_system.AddBody(body_crank)
# Attach a 'box' shape asset for visualization.
mboxcrank = chrono.ChBoxShape()
mboxcrank.GetBoxGeometry().Pos = chrono.ChVectorD(0, 0, 0)
mboxcrank.GetBoxGeometry().SetLengths(chrono.ChVectorD(K, 0.02, 0.02))
body_crank.AddAsset(mboxcrank)
motor = chrono.ChLinkMotorRotationAngle()
motor.Initialize(body_truss, body_crank, chrono.ChFrameD(vG))
myfun = ChFunction_myf()
motor.SetAngleFunction(myfun)
my_system.Add(motor)
# Create a FEM mesh, that is a container for groups
# of elements and their referenced nodes.
my_mesh = fea.ChMesh()
# Create the horizontal beam (use an IGA-beam finite element type, for example)
beam_wy = 0.10
beam_wz = 0.01
# Create a section for the IGA beam.
# IGA beams require ChBeamSectionCosserat sections, containing at least
# a ChElasticityCosserat and ChInertiaCosserat models, and optional ChDampingCosserat and ChPlasticityCosserat.
minertia = fea.ChInertiaCosseratSimple()
minertia.SetAsRectangularSection(beam_wy, beam_wz, 2700) # automatically sets A etc., from width, height, density
melasticity = fea.ChElasticityCosseratSimple()
melasticity.SetYoungModulus(73.0e9)
melasticity.SetGwithPoissonRatio(0.3)
melasticity.SetAsRectangularSection(beam_wy, beam_wz)
msection1 = fea.ChBeamSectionCosserat(minertia, melasticity)
msection1.SetDrawThickness(beam_wy, beam_wz)
builder_iga = fea.ChBuilderBeamIGA()
builder_iga.BuildBeam(my_mesh, # the mesh to put the elements in
msection1, # section of the beam
32, # number of sections (spans)
vA, # start point
vC, # end point
chrono.VECT_Y, # suggested Y direction of section
3) # order (3 = cubic, etc)
builder_iga.GetLastBeamNodes().front().SetFixed(True)
node_tip = builder_iga.GetLastBeamNodes()[-1]
node_mid = builder_iga.GetLastBeamNodes()[17]
# Create the vertical beam (Here use Euler beams, for example).
msection2 = fea.ChBeamSectionEulerAdvanced()
hbeam_d = 0.024
msection2.SetDensity(2700)
msection2.SetYoungModulus(73.0e9)
msection2.SetGwithPoissonRatio(0.3)
msection2.SetBeamRaleyghDamping(0.000)
msection2.SetAsCircularSection(hbeam_d)
builderA = fea.ChBuilderBeamEuler()
builderA.BuildBeam(my_mesh, # the mesh where to put the created nodes and elements
msection2, # the ChBeamSectionEulerAdvanced to use for the ChElementBeamEuler elements
3, # the number of ChElementBeamEuler to create
vC + vd, # the 'A' poin space (beginning of beam)
vB + vd, # the 'B' poin space (end of beam)
chrono.ChVectorD(1, 0, 0)) # the 'Y' up direction of the section for the beam
node_top = builderA.GetLastBeamNodes()[0]
node_down = builderA.GetLastBeamNodes()[-1]
# Create a constrabetween the vertical and horizontal beams:
constr_bb = chrono.ChLinkMateGeneric()
constr_bb.Initialize(node_top, node_tip, False, node_top.Frame(), node_top.Frame())
my_system.Add(constr_bb)
constr_bb.SetConstrainedCoords(True, True, True, # x, y, z
False, False, False) # Rx, Ry, Rz
# For example, attach small shape to show the constraint
msphereconstr2 = chrono.ChSphereShape()
msphereconstr2.GetSphereGeometry().rad = 0.01
constr_bb.AddAsset(msphereconstr2)
# Create a beam as a crank
msection3 = fea.ChBeamSectionEulerAdvanced()
crankbeam_d = 0.048
msection3.SetDensity(2700)
msection3.SetYoungModulus(73.0e9)
msection3.SetGwithPoissonRatio(0.3)
msection3.SetBeamRaleyghDamping(0.000)
msection3.SetAsCircularSection(crankbeam_d)
builderB = fea.ChBuilderBeamEuler()
builderB.BuildBeam(my_mesh, # the mesh where to put the created nodes and elements
msection3, # the ChBeamSectionEulerAdvanced to use for the ChElementBeamEuler elements
3, # the number of ChElementBeamEuler to create
vG + vd, # the 'A' poin space (beginning of beam)
vB + vd, # the 'B' poin space (end of beam)
chrono.ChVectorD(0, 1, 0)) # the 'Y' up direction of the section for the beam
node_crankG = builderB.GetLastBeamNodes()[0]
node_crankB = builderB.GetLastBeamNodes()[-1]
# Create a constraint between the crank beam and body crank:
constr_cbd = chrono.ChLinkMateGeneric()
constr_cbd.Initialize(node_crankG, body_crank, False, node_crankG.Frame(), node_crankG.Frame())
my_system.Add(constr_cbd)
constr_cbd.SetConstrainedCoords(True, True, True, # x, y, z
True, True, True) # Rx, Ry, Rz
# Create a constrabetween the vertical beam and the crank beam:
constr_bc = chrono.ChLinkMateGeneric()
constr_bc.Initialize(node_down, node_crankB, False, node_crankB.Frame(), node_crankB.Frame())
my_system.Add(constr_bc)
constr_bc.SetConstrainedCoords(True, True, True, # x, y, z
True, True, False) # Rx, Ry, Rz
# For example, attach small shape to show the constraint
msphereconstr3 = chrono.ChSphereShape()
msphereconstr3.GetSphereGeometry().rad = 0.01
constr_bc.AddAsset(msphereconstr3)
#
# Final touches..
#
# We do not want gravity effect on FEA elements in this demo
my_mesh.SetAutomaticGravity(False)
# Remember to add the mesh to the system!
my_system.Add(my_mesh)
# ==Asset== attach a visualization of the FEM mesh.
# This will automatically update a triangle mesh (a ChTriangleMeshShape
# asset that is internally managed) by setting proper
# coordinates and vertex colors as in the FEM elements.
# Such triangle mesh can be rendered by Irrlicht or POVray or whatever
# postprocessor that can handle a colored ChTriangleMeshShape).
# Do not forget AddAsset() at the end!
mvisualizebeamA = fea.ChVisualizationFEAmesh(my_mesh)
mvisualizebeamA.SetFEMdataType(fea.ChVisualizationFEAmesh.E_PLOT_ELEM_BEAM_MX)
mvisualizebeamA.SetColorscaleMinMax(-500, 500)
mvisualizebeamA.SetSmoothFaces(True)
mvisualizebeamA.SetWireframe(False)
my_mesh.AddAsset(mvisualizebeamA)
mvisualizebeamC = fea.ChVisualizationFEAmesh(my_mesh)
mvisualizebeamC.SetFEMglyphType(fea.ChVisualizationFEAmesh.E_GLYPH_NODE_CSYS)
mvisualizebeamC.SetFEMdataType(fea.ChVisualizationFEAmesh.E_PLOT_NONE)
mvisualizebeamC.SetSymbolsThickness(0.006)
mvisualizebeamC.SetSymbolsScale(0.01)
mvisualizebeamC.SetZbufferHide(False)
my_mesh.AddAsset(mvisualizebeamC)
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem) on a per-item basis.
application.AssetBindAll()
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
application.AssetUpdateAll()
# SIMULATION LOOP
# Use a solver that can handle stiffnss matrices:
mkl_solver = mkl.ChSolverPardisoMKL()
my_system.SetSolver(mkl_solver)
application.SetTimestep(0.001)
application.SetVideoframeSaveInterval(10)
# Use the following for less numerical damping, 2nd order accuracy (but slower)
ts = chrono.ChTimestepperHHT(my_system)
ts.SetStepControl(False)
my_system.SetTimestepper(ts)
# Output data
if not os.path.isdir(out_dir):
print("Error creating directory " )
filename = out_dir + "/buckling_mid.dat"
#file_out1 = chrono.ChStreamOutAsciiFile(filename)
while (application.GetDevice().run()):
application.BeginScene()
application.DrawAll()
chronoirr.ChIrrTools.drawGrid(application.GetVideoDriver(), 0.05, 0.05, 20, 20, chrono.ChCoordsysD(chrono.VNULL, chrono.CH_C_PI_2, chrono.VECT_Z),
chronoirr.SColor(50, 90, 90, 90), True)
application.DoStep()
# Save output for the first 0.4 seconds
#if (application.GetSystem().GetChTime() <= 0.4):
#file_out1(application.GetSystem().GetChTime() + " " + node_mid.GetPos().z() + " " + node_mid.GetWvel_par().x() + "\n")
application.EndScene()
| |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
sys.path.insert(0, os.getcwd())
import json
import platform
import subprocess
import sys
from glob import glob
VERSION = "v0.7"
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s %(filename)s:%(lineno)d %(levelname)s] %(message)s")
from code.common.system_list import system_list
def is_xavier():
return platform.processor() == "aarch64"
def get_system_id():
arch = platform.processor()
if is_xavier():
# The only officially support aarch64 platform is Jetson Xavier
with open("/sys/firmware/devicetree/base/model") as product_f:
product_name = product_f.read()
if "jetson" in product_name.lower():
if "AGX" in product_name:
return "AGX_Xavier"
elif "NX" in product_name:
return "Xavier_NX"
else:
raise RuntimeError("Unrecognized aarch64 device. Only AGX Xavier and Xavier NX are supported.")
try:
import pycuda.driver
import pycuda.autoinit
name = pycuda.driver.Device(0).name()
count_actual = pycuda.driver.Device.count()
except:
nvidia_smi_out = run_command("nvidia-smi -L", get_output=True, tee=False)
# Strip empty lines
tmp = [ line for line in nvidia_smi_out if len(line) > 0 ]
count_actual = len(tmp)
if count_actual == 0:
raise RuntimeError("nvidia-smi did not detect any GPUs:\n{:}".format(nvidia_smi_out))
# Format: GPU #: <name> (UUID: <uuid>)
name = tmp[0].split("(")[0].split(": ")[1].strip()
system_id, matched, closest = ("", "", -1000)
for system in system_list:
if system[1] not in name:
continue
# Match exact name with higher priority than partial name
if matched == name and system[1] != name:
continue
closer = (abs(count_actual - system[2]) < abs(count_actual - closest))
if closer or (matched != name and system[1] == name):
system_id, matched, closest = system
if closest == -1000:
raise RuntimeError("Cannot find valid configs for {:d}x {:}. Please pass in config path using --configs=<PATH>.".format(count_actual, name))
elif closest != count_actual:
logging.warn("Cannot find valid configs for {:d}x {:}. Using {:d}x {:} configs instead.".format(count_actual, name, closest, name))
return system_id
class BENCHMARKS:
# Official names for benchmarks
BERT = "bert"
ALL = [BERT]
# Whatever we might call it
alias_map = {
"BERT": BERT,
"bert": BERT
}
def alias(name):
if not name in BENCHMARKS.alias_map:
raise ValueError("Unknown benchmark: {:}".format(name))
return BENCHMARKS.alias_map[name]
class SCENARIOS:
# Official names for scenarios
Offline = "Offline"
ALL = [Offline]
# Whatever we might call it
alias_map = {
"Offline": Offline,
"offline": Offline
}
def alias(name):
if not name in SCENARIOS.alias_map:
raise ValueError("Unknown scenario: {:}".format(name))
return SCENARIOS.alias_map[name]
def run_command(cmd, get_output=False, tee=True, custom_env=None):
"""
Runs a command.
Args:
cmd (str): The command to run.
get_output (bool): If true, run_command will return the stdout output. Default: False.
tee (bool): If true, captures output (if get_output is true) as well as prints output to stdout. Otherwise, does
not print to stdout.
"""
logging.info("Running command: {:}".format(cmd))
if not get_output:
return subprocess.check_call(cmd, shell=True)
else:
output = []
if custom_env is not None:
logging.info("Overriding Environment")
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True, env=custom_env)
else:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
for line in iter(p.stdout.readline, b""):
line = line.decode("utf-8")
if tee:
sys.stdout.write(line)
sys.stdout.flush()
output.append(line.rstrip("\n"))
ret = p.wait()
if ret == 0:
return output
else:
raise subprocess.CalledProcessError(ret, cmd)
def args_to_string(d, blacklist=[], delimit=True, double_delimit=False):
flags = []
for flag in d:
# Skip unset
if d[flag] is None:
continue
# Skip blacklisted
if flag in blacklist:
continue
if type(d[flag]) is bool:
if d[flag] is True:
flags.append("--{:}=true".format(flag))
elif d[flag] is False:
flags.append("--{:}=false".format(flag))
elif type(d[flag]) in [int, float] or not delimit:
flags.append("--{:}={:}".format(flag, d[flag]))
else:
if double_delimit:
flags.append("--{:}=\\\"{:}\\\"".format(flag, d[flag]))
else:
flags.append("--{:}=\"{:}\"".format(flag, d[flag]))
return " ".join(flags)
def flags_bool_to_int(d):
for flag in d:
if type(d[flag]) is bool:
if d[flag]:
d[flag] = 1
else:
d[flag] = 0
return d
def dict_get(d, key, default=None):
val = d.get(key, default)
return default if val is None else val
def find_config_files(benchmarks, scenarios):
config_file_candidates = ["configs/{:}/{:}/config.json".format(benchmark, scenario)
for scenario in scenarios
for benchmark in benchmarks
]
# Only return existing files
config_file_candidates = [i for i in config_file_candidates if os.path.exists(i)]
return ",".join(config_file_candidates)
def load_configs(config_files):
configs = []
for config in config_files.split(","):
file_locs = glob(config)
if len(file_locs) == 0:
raise ValueError("Config file {:} cannot be found.".format(config))
for file_loc in file_locs:
with open(file_loc) as f:
logging.info("Parsing config file {:} ...".format(file_loc))
configs.append(json.load(f))
return configs
| |
import itertools
import warnings
from collections import Counter
from typing import Iterable, Sequence, Union
import pandas as pd
from . import dtypes
from .concat import concat
from .dataarray import DataArray
from .dataset import Dataset
from .merge import merge
from .utils import iterate_nested
def _infer_concat_order_from_positions(datasets):
return dict(_infer_tile_ids_from_nested_list(datasets, ()))
def _infer_tile_ids_from_nested_list(entry, current_pos):
"""
Given a list of lists (of lists...) of objects, returns a iterator
which returns a tuple containing the index of each object in the nested
list structure as the key, and the object. This can then be called by the
dict constructor to create a dictionary of the objects organised by their
position in the original nested list.
Recursively traverses the given structure, while keeping track of the
current position. Should work for any type of object which isn't a list.
Parameters
----------
entry : list[list[obj, obj, ...], ...]
List of lists of arbitrary depth, containing objects in the order
they are to be concatenated.
Returns
-------
combined_tile_ids : dict[tuple(int, ...), obj]
"""
if isinstance(entry, list):
for i, item in enumerate(entry):
yield from _infer_tile_ids_from_nested_list(item, current_pos + (i,))
else:
yield current_pos, entry
def _ensure_same_types(series, dim):
if series.dtype == object:
types = set(series.map(type))
if len(types) > 1:
try:
import cftime
cftimes = any(issubclass(t, cftime.datetime) for t in types)
except ImportError:
cftimes = False
types = ", ".join(t.__name__ for t in types)
error_msg = (
f"Cannot combine along dimension '{dim}' with mixed types."
f" Found: {types}."
)
if cftimes:
error_msg = (
f"{error_msg} If importing data directly from a file then "
f"setting `use_cftime=True` may fix this issue."
)
raise TypeError(error_msg)
def _infer_concat_order_from_coords(datasets):
concat_dims = []
tile_ids = [() for ds in datasets]
# All datasets have same variables because they've been grouped as such
ds0 = datasets[0]
for dim in ds0.dims:
# Check if dim is a coordinate dimension
if dim in ds0:
# Need to read coordinate values to do ordering
indexes = [ds.xindexes.get(dim) for ds in datasets]
if any(index is None for index in indexes):
raise ValueError(
"Every dimension needs a coordinate for "
"inferring concatenation order"
)
# TODO (benbovy, flexible indexes): support flexible indexes?
indexes = [index.to_pandas_index() for index in indexes]
# If dimension coordinate values are same on every dataset then
# should be leaving this dimension alone (it's just a "bystander")
if not all(index.equals(indexes[0]) for index in indexes[1:]):
# Infer order datasets should be arranged in along this dim
concat_dims.append(dim)
if all(index.is_monotonic_increasing for index in indexes):
ascending = True
elif all(index.is_monotonic_decreasing for index in indexes):
ascending = False
else:
raise ValueError(
"Coordinate variable {} is neither "
"monotonically increasing nor "
"monotonically decreasing on all datasets".format(dim)
)
# Assume that any two datasets whose coord along dim starts
# with the same value have the same coord values throughout.
if any(index.size == 0 for index in indexes):
raise ValueError("Cannot handle size zero dimensions")
first_items = pd.Index([index[0] for index in indexes])
series = first_items.to_series()
# ensure series does not contain mixed types, e.g. cftime calendars
_ensure_same_types(series, dim)
# Sort datasets along dim
# We want rank but with identical elements given identical
# position indices - they should be concatenated along another
# dimension, not along this one
rank = series.rank(
method="dense", ascending=ascending, numeric_only=False
)
order = rank.astype(int).values - 1
# Append positions along extra dimension to structure which
# encodes the multi-dimensional concatenation order
tile_ids = [
tile_id + (position,) for tile_id, position in zip(tile_ids, order)
]
if len(datasets) > 1 and not concat_dims:
raise ValueError(
"Could not find any dimension coordinates to use to "
"order the datasets for concatenation"
)
combined_ids = dict(zip(tile_ids, datasets))
return combined_ids, concat_dims
def _check_dimension_depth_tile_ids(combined_tile_ids):
"""
Check all tuples are the same length, i.e. check that all lists are
nested to the same depth.
"""
tile_ids = combined_tile_ids.keys()
nesting_depths = [len(tile_id) for tile_id in tile_ids]
if not nesting_depths:
nesting_depths = [0]
if set(nesting_depths) != {nesting_depths[0]}:
raise ValueError(
"The supplied objects do not form a hypercube because"
" sub-lists do not have consistent depths"
)
# return these just to be reused in _check_shape_tile_ids
return tile_ids, nesting_depths
def _check_shape_tile_ids(combined_tile_ids):
"""Check all lists along one dimension are same length."""
tile_ids, nesting_depths = _check_dimension_depth_tile_ids(combined_tile_ids)
for dim in range(nesting_depths[0]):
indices_along_dim = [tile_id[dim] for tile_id in tile_ids]
occurrences = Counter(indices_along_dim)
if len(set(occurrences.values())) != 1:
raise ValueError(
"The supplied objects do not form a hypercube "
"because sub-lists do not have consistent "
"lengths along dimension" + str(dim)
)
def _combine_nd(
combined_ids,
concat_dims,
data_vars="all",
coords="different",
compat="no_conflicts",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Combines an N-dimensional structure of datasets into one by applying a
series of either concat and merge operations along each dimension.
No checks are performed on the consistency of the datasets, concat_dims or
tile_IDs, because it is assumed that this has already been done.
Parameters
----------
combined_ids : Dict[Tuple[int, ...]], xarray.Dataset]
Structure containing all datasets to be concatenated with "tile_IDs" as
keys, which specify position within the desired final combined result.
concat_dims : sequence of str
The dimensions along which the datasets should be concatenated. Must be
in order, and the length must match the length of the tuples used as
keys in combined_ids. If the string is a dimension name then concat
along that dimension, if it is None then merge.
Returns
-------
combined_ds : xarray.Dataset
"""
example_tile_id = next(iter(combined_ids.keys()))
n_dims = len(example_tile_id)
if len(concat_dims) != n_dims:
raise ValueError(
"concat_dims has length {} but the datasets "
"passed are nested in a {}-dimensional structure".format(
len(concat_dims), n_dims
)
)
# Each iteration of this loop reduces the length of the tile_ids tuples
# by one. It always combines along the first dimension, removing the first
# element of the tuple
for concat_dim in concat_dims:
combined_ids = _combine_all_along_first_dim(
combined_ids,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
(combined_ds,) = combined_ids.values()
return combined_ds
def _combine_all_along_first_dim(
combined_ids,
dim,
data_vars,
coords,
compat,
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
# Group into lines of datasets which must be combined along dim
# need to sort by _new_tile_id first for groupby to work
# TODO: is the sorted need?
combined_ids = dict(sorted(combined_ids.items(), key=_new_tile_id))
grouped = itertools.groupby(combined_ids.items(), key=_new_tile_id)
# Combine all of these datasets along dim
new_combined_ids = {}
for new_id, group in grouped:
combined_ids = dict(sorted(group))
datasets = combined_ids.values()
new_combined_ids[new_id] = _combine_1d(
datasets, dim, compat, data_vars, coords, fill_value, join, combine_attrs
)
return new_combined_ids
def _combine_1d(
datasets,
concat_dim,
compat="no_conflicts",
data_vars="all",
coords="different",
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
"""
Applies either concat or merge to 1D list of datasets depending on value
of concat_dim
"""
if concat_dim is not None:
try:
combined = concat(
datasets,
dim=concat_dim,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
except ValueError as err:
if "encountered unexpected variable" in str(err):
raise ValueError(
"These objects cannot be combined using only "
"xarray.combine_nested, instead either use "
"xarray.combine_by_coords, or do it manually "
"with xarray.concat, xarray.merge and "
"xarray.align"
)
else:
raise
else:
combined = merge(
datasets,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
return combined
def _new_tile_id(single_id_ds_pair):
tile_id, ds = single_id_ds_pair
return tile_id[1:]
def _nested_combine(
datasets,
concat_dims,
compat,
data_vars,
coords,
ids,
fill_value=dtypes.NA,
join="outer",
combine_attrs="drop",
):
if len(datasets) == 0:
return Dataset()
# Arrange datasets for concatenation
# Use information from the shape of the user input
if not ids:
# Determine tile_IDs by structure of input in N-D
# (i.e. ordering in list-of-lists)
combined_ids = _infer_concat_order_from_positions(datasets)
else:
# Already sorted so just use the ids already passed
combined_ids = dict(zip(ids, datasets))
# Check that the inferred shape is combinable
_check_shape_tile_ids(combined_ids)
# Apply series of concatenate or merge operations along each dimension
combined = _combine_nd(
combined_ids,
concat_dims,
compat=compat,
data_vars=data_vars,
coords=coords,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
return combined
# Define type for arbitrarily-nested list of lists recursively
# Currently mypy cannot handle this but other linters can (https://stackoverflow.com/a/53845083/3154101)
DATASET_HYPERCUBE = Union[Dataset, Iterable["DATASET_HYPERCUBE"]] # type: ignore
def combine_nested(
datasets: DATASET_HYPERCUBE,
concat_dim: Union[
str, DataArray, None, Sequence[Union[str, "DataArray", pd.Index, None]]
],
compat: str = "no_conflicts",
data_vars: str = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: str = "outer",
combine_attrs: str = "drop",
) -> Dataset:
"""
Explicitly combine an N-dimensional grid of datasets into one by using a
succession of concat and merge operations along each dimension of the grid.
Does not sort the supplied datasets under any circumstances, so the
datasets must be passed in the order you wish them to be concatenated. It
does align coordinates, but different variables on datasets can cause it to
fail under some scenarios. In complex cases, you may need to clean up your
data and use concat/merge explicitly.
To concatenate along multiple dimensions the datasets must be passed as a
nested list-of-lists, with a depth equal to the length of ``concat_dims``.
``combine_nested`` will concatenate along the top-level list first.
Useful for combining datasets from a set of nested directories, or for
collecting the output of a simulation parallelized along multiple
dimensions.
Parameters
----------
datasets : list or nested list of Dataset
Dataset objects to combine.
If concatenation or merging along more than one dimension is desired,
then datasets must be supplied in a nested list-of-lists.
concat_dim : str, or list of str, DataArray, Index or None
Dimensions along which to concatenate variables, as used by
:py:func:`xarray.concat`.
Set ``concat_dim=[..., None, ...]`` explicitly to disable concatenation
and merge instead along a particular dimension.
The position of ``None`` in the list specifies the dimension of the
nested-list input along which to merge.
Must be the same length as the depth of the list passed to
``datasets``.
compat : {"identical", "equals", "broadcast_equals", \
"no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential merge conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
coords : {"minimal", "different", "all" or list of str}, optional
Details are in the documentation of concat
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset
Examples
--------
A common task is collecting data from a parallelized simulation in which
each process wrote out to a separate file. A domain which was decomposed
into 4 parts, 2 each along both the x and y axes, requires organising the
datasets into a doubly-nested list, e.g:
>>> x1y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x1y1
<xarray.Dataset>
Dimensions: (x: 2, y: 2)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 1.764 0.4002 0.9787 2.241
precipitation (x, y) float64 1.868 -0.9773 0.9501 -0.1514
>>> x1y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y1 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> x2y2 = xr.Dataset(
... {
... "temperature": (("x", "y"), np.random.randn(2, 2)),
... "precipitation": (("x", "y"), np.random.randn(2, 2)),
... }
... )
>>> ds_grid = [[x1y1, x1y2], [x2y1, x2y2]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["x", "y"])
>>> combined
<xarray.Dataset>
Dimensions: (x: 4, y: 4)
Dimensions without coordinates: x, y
Data variables:
temperature (x, y) float64 1.764 0.4002 -0.1032 ... 0.04576 -0.1872
precipitation (x, y) float64 1.868 -0.9773 0.761 ... -0.7422 0.1549 0.3782
``combine_nested`` can also be used to explicitly merge datasets with
different variables. For example if we have 4 datasets, which are divided
along two times, and contain two different variables, we can pass ``None``
to ``concat_dim`` to specify the dimension of the nested list over which
we wish to use ``merge`` instead of ``concat``:
>>> t1temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t1temp
<xarray.Dataset>
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 -0.8878 -1.981 -0.3479 0.1563 1.23
>>> t1precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> t1precip
<xarray.Dataset>
Dimensions: (t: 5)
Dimensions without coordinates: t
Data variables:
precipitation (t) float64 1.202 -0.3873 -0.3023 -1.049 -1.42
>>> t2temp = xr.Dataset({"temperature": ("t", np.random.randn(5))})
>>> t2precip = xr.Dataset({"precipitation": ("t", np.random.randn(5))})
>>> ds_grid = [[t1temp, t1precip], [t2temp, t2precip]]
>>> combined = xr.combine_nested(ds_grid, concat_dim=["t", None])
>>> combined
<xarray.Dataset>
Dimensions: (t: 10)
Dimensions without coordinates: t
Data variables:
temperature (t) float64 -0.8878 -1.981 -0.3479 ... -0.5097 -0.4381 -1.253
precipitation (t) float64 1.202 -0.3873 -0.3023 ... -0.2127 -0.8955 0.3869
See also
--------
concat
merge
"""
mixed_datasets_and_arrays = any(
isinstance(obj, Dataset) for obj in iterate_nested(datasets)
) and any(
isinstance(obj, DataArray) and obj.name is None
for obj in iterate_nested(datasets)
)
if mixed_datasets_and_arrays:
raise ValueError("Can't combine datasets with unnamed arrays.")
if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
concat_dim = [concat_dim]
# The IDs argument tells _nested_combine that datasets aren't yet sorted
return _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=False,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
def vars_as_keys(ds):
return tuple(sorted(ds))
def _combine_single_variable_hypercube(
datasets,
fill_value=dtypes.NA,
data_vars="all",
coords="different",
compat="no_conflicts",
join="outer",
combine_attrs="no_conflicts",
):
"""
Attempt to combine a list of Datasets into a hypercube using their
coordinates.
All provided Datasets must belong to a single variable, ie. must be
assigned the same variable name. This precondition is not checked by this
function, so the caller is assumed to know what it's doing.
This function is NOT part of the public API.
"""
if len(datasets) == 0:
raise ValueError(
"At least one Dataset is required to resolve variable names "
"for combined hypercube."
)
combined_ids, concat_dims = _infer_concat_order_from_coords(list(datasets))
if fill_value is None:
# check that datasets form complete hypercube
_check_shape_tile_ids(combined_ids)
else:
# check only that all datasets have same dimension depth for these
# vars
_check_dimension_depth_tile_ids(combined_ids)
# Concatenate along all of concat_dims one by one to create single ds
concatenated = _combine_nd(
combined_ids,
concat_dims=concat_dims,
data_vars=data_vars,
coords=coords,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
# Check the overall coordinates are monotonically increasing
for dim in concat_dims:
indexes = concatenated.indexes.get(dim)
if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing):
raise ValueError(
"Resulting object does not have monotonic"
" global indexes along dimension {}".format(dim)
)
return concatenated
# TODO remove empty list default param after version 0.21, see PR4696
def combine_by_coords(
data_objects: Sequence[Union[Dataset, DataArray]] = [],
compat: str = "no_conflicts",
data_vars: str = "all",
coords: str = "different",
fill_value: object = dtypes.NA,
join: str = "outer",
combine_attrs: str = "no_conflicts",
datasets: Sequence[Dataset] = None,
) -> Union[Dataset, DataArray]:
"""
Attempt to auto-magically combine the given datasets (or data arrays)
into one by using dimension coordinates.
This function attempts to combine a group of datasets along any number of
dimensions into a single entity by inspecting coords and metadata and using
a combination of concat and merge.
Will attempt to order the datasets such that the values in their dimension
coordinates are monotonic along all dimensions. If it cannot determine the
order in which to concatenate the datasets, it will raise a ValueError.
Non-coordinate dimensions will be ignored, as will any coordinate
dimensions which do not vary between each dataset.
Aligns coordinates, but different variables on datasets can cause it
to fail under some scenarios. In complex cases, you may need to clean up
your data and use concat/merge explicitly (also see `combine_nested`).
Works well if, for example, you have N years of data and M data variables,
and each combination of a distinct time period and set of data variables is
saved as its own dataset. Also useful for if you have a simulation which is
parallelized in multiple dimensions, but has global coordinates saved in
each file specifying the positions of points within the global domain.
Parameters
----------
data_objects : sequence of xarray.Dataset or sequence of xarray.DataArray
Data objects to combine.
compat : {"identical", "equals", "broadcast_equals", "no_conflicts", "override"}, optional
String indicating how to compare variables of the same name for
potential conflicts:
- "broadcast_equals": all values must be equal when variables are
broadcast against each other to ensure common dimensions.
- "equals": all values and dimensions must be the same.
- "identical": all values, dimensions and attributes must be the
same.
- "no_conflicts": only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
- "override": skip comparing and pick variable from first dataset
data_vars : {"minimal", "different", "all" or list of str}, optional
These data variables will be concatenated together:
* "minimal": Only data variables in which the dimension already
appears are included.
* "different": Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* "all": All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the "minimal" data variables.
If objects are DataArrays, `data_vars` must be "all".
coords : {"minimal", "different", "all"} or list of str, optional
As per the "data_vars" kwarg, but for coordinate variables.
fill_value : scalar or dict-like, optional
Value to use for newly missing values. If a dict-like, maps
variable names to fill values. Use a data array's name to
refer to its values. If None, raises a ValueError if
the passed Datasets do not create a complete hypercube.
join : {"outer", "inner", "left", "right", "exact"}, optional
String indicating how to combine differing indexes in objects
- "outer": use the union of object indexes
- "inner": use the intersection of object indexes
- "left": use indexes from the first object with each dimension
- "right": use indexes from the last object with each dimension
- "exact": instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- "override": if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
combine_attrs : {"drop", "identical", "no_conflicts", "drop_conflicts", \
"override"} or callable, default: "drop"
A callable or a string indicating how to combine attrs of the objects being
merged:
- "drop": empty attrs on returned Dataset.
- "identical": all attrs must be the same on every object.
- "no_conflicts": attrs from all objects are combined, any that have
the same name must also have the same value.
- "drop_conflicts": attrs from all objects are combined, any that have
the same name but different values are dropped.
- "override": skip comparing and copy attrs from the first dataset to
the result.
If a callable, it must expect a sequence of ``attrs`` dicts and a context object
as its only parameters.
Returns
-------
combined : xarray.Dataset or xarray.DataArray
Will return a Dataset unless all the inputs are unnamed DataArrays, in which case a
DataArray will be returned.
See also
--------
concat
merge
combine_nested
Examples
--------
Combining two datasets using their common dimension coordinates. Notice
they are concatenated based on the values in their dimension coordinates,
not on their position in the list passed to `combine_by_coords`.
>>> x1 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [0, 1], "x": [10, 20, 30]},
... )
>>> x2 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [10, 20, 30]},
... )
>>> x3 = xr.Dataset(
... {
... "temperature": (("y", "x"), 20 * np.random.rand(6).reshape(2, 3)),
... "precipitation": (("y", "x"), np.random.rand(6).reshape(2, 3)),
... },
... coords={"y": [2, 3], "x": [40, 50, 60]},
... )
>>> x1
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 8.473 12.92
precipitation (y, x) float64 0.4376 0.8918 0.9637 0.3834 0.7917 0.5289
>>> x2
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 11.36 18.51 1.421 1.743 0.4044 16.65
precipitation (y, x) float64 0.7782 0.87 0.9786 0.7992 0.4615 0.7805
>>> x3
<xarray.Dataset>
Dimensions: (y: 2, x: 3)
Coordinates:
* y (y) int64 2 3
* x (x) int64 40 50 60
Data variables:
temperature (y, x) float64 2.365 12.8 2.867 18.89 10.44 8.293
precipitation (y, x) float64 0.2646 0.7742 0.4562 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x2, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 3)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 10.9 ... 1.743 0.4044 16.65
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.7992 0.4615 0.7805
>>> xr.combine_by_coords([x3, x1])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... nan 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x3, x1], join="override")
<xarray.Dataset>
Dimensions: (y: 2, x: 6)
Coordinates:
* y (y) int64 0 1
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 2.365 ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
>>> xr.combine_by_coords([x1, x2, x3])
<xarray.Dataset>
Dimensions: (y: 4, x: 6)
Coordinates:
* y (y) int64 0 1 2 3
* x (x) int64 10 20 30 40 50 60
Data variables:
temperature (y, x) float64 10.98 14.3 12.06 nan ... 18.89 10.44 8.293
precipitation (y, x) float64 0.4376 0.8918 0.9637 ... 0.5684 0.01879 0.6176
You can also combine DataArray objects, but the behaviour will differ depending on
whether or not the DataArrays are named. If all DataArrays are named then they will
be promoted to Datasets before combining, and then the resultant Dataset will be
returned, e.g.
>>> named_da1 = xr.DataArray(
... name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"
... )
>>> named_da1
<xarray.DataArray 'a' (x: 2)>
array([1., 2.])
Coordinates:
* x (x) int64 0 1
>>> named_da2 = xr.DataArray(
... name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x"
... )
>>> named_da2
<xarray.DataArray 'a' (x: 2)>
array([3., 4.])
Coordinates:
* x (x) int64 2 3
>>> xr.combine_by_coords([named_da1, named_da2])
<xarray.Dataset>
Dimensions: (x: 4)
Coordinates:
* x (x) int64 0 1 2 3
Data variables:
a (x) float64 1.0 2.0 3.0 4.0
If all the DataArrays are unnamed, a single DataArray will be returned, e.g.
>>> unnamed_da1 = xr.DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x")
>>> unnamed_da2 = xr.DataArray(data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x")
>>> xr.combine_by_coords([unnamed_da1, unnamed_da2])
<xarray.DataArray (x: 4)>
array([1., 2., 3., 4.])
Coordinates:
* x (x) int64 0 1 2 3
Finally, if you attempt to combine a mix of unnamed DataArrays with either named
DataArrays or Datasets, a ValueError will be raised (as this is an ambiguous operation).
"""
# TODO remove after version 0.21, see PR4696
if datasets is not None:
warnings.warn(
"The datasets argument has been renamed to `data_objects`."
" From 0.21 on passing a value for datasets will raise an error."
)
data_objects = datasets
if not data_objects:
return Dataset()
objs_are_unnamed_dataarrays = [
isinstance(data_object, DataArray) and data_object.name is None
for data_object in data_objects
]
if any(objs_are_unnamed_dataarrays):
if all(objs_are_unnamed_dataarrays):
# Combine into a single larger DataArray
temp_datasets = [
unnamed_dataarray._to_temp_dataset()
for unnamed_dataarray in data_objects
]
combined_temp_dataset = _combine_single_variable_hypercube(
temp_datasets,
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
return DataArray()._from_temp_dataset(combined_temp_dataset)
else:
# Must be a mix of unnamed dataarrays with either named dataarrays or with datasets
# Can't combine these as we wouldn't know whether to merge or concatenate the arrays
raise ValueError(
"Can't automatically combine unnamed DataArrays with either named DataArrays or Datasets."
)
else:
# Promote any named DataArrays to single-variable Datasets to simplify combining
data_objects = [
obj.to_dataset() if isinstance(obj, DataArray) else obj
for obj in data_objects
]
# Group by data vars
sorted_datasets = sorted(data_objects, key=vars_as_keys)
grouped_by_vars = itertools.groupby(sorted_datasets, key=vars_as_keys)
# Perform the multidimensional combine on each group of data variables
# before merging back together
concatenated_grouped_by_data_vars = []
for vars, datasets_with_same_vars in grouped_by_vars:
concatenated = _combine_single_variable_hypercube(
list(datasets_with_same_vars),
fill_value=fill_value,
data_vars=data_vars,
coords=coords,
compat=compat,
join=join,
combine_attrs=combine_attrs,
)
concatenated_grouped_by_data_vars.append(concatenated)
return merge(
concatenated_grouped_by_data_vars,
compat=compat,
fill_value=fill_value,
join=join,
combine_attrs=combine_attrs,
)
| |
import tb_model
import data_processing
from scipy.stats import norm, beta
import numpy
import tool_kit
"""
This module provides an object-oriented structure for running model objects. Manual calibration and uncertainty are
provided as examples (being largely user-coded, with few dependencies for the main running processes).
"""
class ModelRunner:
"""
Object to coordinate running of all the functions of the platform, such as manual scenario running and uncertainty.
Stores model objects as attributes to itself.
"""
def __init__(self, country, fixed_parameters, time_variant_parameters, mode='manual',
param_ranges_unc=[], epi_outputs_to_analyse=[], scenario_implementation=[],
uncertainty_accepted_runs=50, burn_in=5,
integration_times={'start': 1900, 'finish': 2035, 'step': .05},
target={'indicator': 'incidence', 'estimate': 150., 'sd': 30., 'year': 2016},
additional_riskgroups={}):
"""
Instantiation method for model runner.
Args:
country: String for country being simulated
fixed_parameters: Dictionary of parameter set used to run manual calibration
mode: Whether scenario or uncertainty being run, set to either 'manual' or 'uncertainty'
scenarios_to_run: List of values for scenarios to be simulated (with 0 being baseline)
param_ranges_unc: List of dictionaries for the uncertainty parameters to be considered
epi_outputs_to_analyse: List of strings for the epidemiological outcomes of interest (e.g. incidence)
scenario_implementation: List of dictionaries for scenarios to be implemented as described in interface
uncertainty_accepted_runs: How many accepted uncertainty runs are required
burn_in: How many runs to be discarded as a burn in
target: Dictionary describing values for model to be calibrated against
"""
# convert arguments to attributes
self.country = country
self.fixed_parameters = fixed_parameters
self.time_variant_parameters = time_variant_parameters
self.mode = mode
self.param_ranges_unc = param_ranges_unc
self.epi_outputs_to_analyse = epi_outputs_to_analyse
self.scenario_implementation = scenario_implementation
self.scenarios_to_run = range(len(scenario_implementation))
self.uncertainty_accepted_runs = uncertainty_accepted_runs
self.burn_in = burn_in
self.integration_times = integration_times
self.target = target
self.additional_riskgroups = additional_riskgroups
# inputs obtained from spreadsheet reading and data processing
self.inputs = data_processing.Inputs(self.country, self.fixed_parameters,
self.time_variant_parameters, self.scenario_implementation)
self.inputs.read_and_load_data()
# dictionary for storing models
self.model_dict = {}
# output-related attributes
self.epi_outputs = {}
self.epi_outputs_uncertainty = []
self.epi_outputs_uncertainty_centiles = {}
# uncertainty-related attributes
self.accepted_indices = []
self.is_last_run_success = True
###############################################
### Master methods to run all other methods ###
###############################################
def master_runner(self):
"""
Calls methods to run model with either of the two fundamental approaches presented here.
"""
if self.mode == 'manual':
self.run_manual_calibration()
elif self.mode == 'uncertainty':
self.run_uncertainty()
self.find_uncertainty_centiles()
def run_manual_calibration(self):
"""
Runs each of the scenarios a single time, starting from baseline scenario with fixed parameter values.
"""
for scenario in self.scenarios_to_run:
# name and initialise model
self.model_dict[scenario] \
= tb_model.TbModel(self.fixed_parameters, self.inputs, scenario, self.additional_riskgroups)
# describe model and integrate
print('Running scenario ' + str(scenario) + ' conditions for ' + self.country +
' using single parameter set')
self.model_dict[scenario].make_times(self.integration_times['start'],
self.integration_times['finish'],
self.integration_times['step'])
self.model_dict[scenario].integrate(method='explicit')
# find epidemiological model outputs for each scenario
self.epi_outputs[scenario] = self.find_epi_outputs(scenario)
####################################
### Model interpretation methods ###
####################################
def find_epi_outputs(self, scenario):
"""
Method to extract requested epidemiological outputs from the models. Returns a data object (rather than adding
data directly to self) in order that it can be used for both scenario running and uncertainty.
"""
# first create a list of model times as de facto keys for the series of lists created below
epi_outputs = {'times': self.model_dict[scenario].times}
# initialise lists
for output in self.epi_outputs_to_analyse:
epi_outputs[output] = [0.] * len(epi_outputs['times'])
# population
if 'population' in self.epi_outputs_to_analyse:
for compartment in self.model_dict[scenario].compartments:
epi_outputs['population'] \
= tool_kit.elementwise_list_addition(self.model_dict[scenario].get_compartment_soln(compartment),
epi_outputs['population'])
total_denominator = tool_kit.prepare_denominator(epi_outputs['population'])
# incidence
if 'incidence' in self.epi_outputs_to_analyse:
# fixed flows
for from_label, to_label, rate in self.model_dict[scenario].fixed_transfer_rate_flows:
if 'latent' in from_label and 'active' in to_label:
incidence_increment \
= self.model_dict[scenario].get_compartment_soln(from_label) * rate / total_denominator * 1e5
epi_outputs['incidence'] \
= tool_kit.elementwise_list_addition(incidence_increment, epi_outputs['incidence'])
# variable flows (note that there are currently none to which this is applicable, but could be)
for from_label, to_label, rate in self.model_dict[scenario].var_transfer_rate_flows:
if 'latent' in from_label and 'active' in to_label:
incidence_increment \
= self.model_dict[scenario].get_compartment_soln(from_label) \
* self.model_dict[scenario].get_var_soln(rate) / total_denominator * 1e5
epi_outputs['incidence'] \
= tool_kit.elementwise_list_addition(incidence_increment, epi_outputs['incidence'])
# prevalence
if 'prevalence' in self.epi_outputs_to_analyse:
for label in self.model_dict[scenario].labels:
if 'susceptible' not in label and 'latent' not in label:
prevalence_increment = self.model_dict[scenario].get_compartment_soln(label) \
/ total_denominator * 1e5
epi_outputs['prevalence'] \
= tool_kit.elementwise_list_addition(prevalence_increment, epi_outputs['prevalence'])
return epi_outputs
def find_uncertainty_centiles(self):
"""
Find percentiles from uncertainty dictionaries.
"""
accepted_no_burn_in_indices = [i for i in self.accepted_indices if i > self.burn_in]
for output in self.epi_outputs_to_analyse:
self.epi_outputs_uncertainty_centiles[output] \
= numpy.percentile(self.epi_outputs_uncertainty[output][accepted_no_burn_in_indices, :],
[2.5, 50., 97.5], axis=0)
###########################
### Uncertainty methods ###
###########################
def run_uncertainty(self):
"""
Main method to run all the uncertainty processes.
"""
# prepare for uncertainty loop
print('Uncertainty analysis commenced')
n_accepted = 0
prev_log_likelihood = -1e10
run = 0
self.model_dict['uncertainty'] \
= tb_model.TbModel(self.fixed_parameters, self.inputs, 0, self.additional_riskgroups)
# find initial set of parameters
new_param_list = []
for i in range(len(self.param_ranges_unc)):
new_param_list.append(self.param_ranges_unc[i]['start'])
params = new_param_list
# until a sufficient number of parameters are accepted
while n_accepted < self.uncertainty_accepted_runs:
# run baseline integration
self.run_with_params(new_param_list)
# store regardless of acceptance (provided model ran successfully)
if self.is_last_run_success:
# calculate uncertainty outputs and store results
self.store_uncertainty()
# calculate prior
prior_log_likelihood = 0.
for i in range(len(self.param_ranges_unc)):
param_val = new_param_list[i]
bound_low, bound_high \
= self.param_ranges_unc[i]['lower_bound'], self.param_ranges_unc[i]['upper_bound']
# normalise and find log PDF from appropriate distribution
if self.param_ranges_unc[i]['distribution'] == 'beta':
prior_log_likelihood += beta.logpdf((param_val - bound_low) / (bound_high - bound_low), 2., 2.)
elif self.param_ranges_unc[i]['distribution'] == 'uniform':
prior_log_likelihood += numpy.log(1. / (bound_high - bound_low))
# calculate posterior
epi_result = self.epi_outputs['uncertainty'][self.target['indicator']][
self.find_time_index(self.target['year'], 'uncertainty')]
posterior_log_likelihood = norm.logpdf(self.epi_outputs['uncertainty'][self.target['indicator']][
self.find_time_index(self.target['year'], 'uncertainty')], self.target['estimate'], self.target['sd'])
# determine acceptance
log_likelihood = prior_log_likelihood + posterior_log_likelihood
accepted = numpy.random.binomial(n=1, p=min(1., numpy.exp(log_likelihood - prev_log_likelihood)))
# update likelihood, parameters and record acceptance for next run
if bool(accepted):
n_accepted += 1
prev_log_likelihood = log_likelihood
params = new_param_list
self.accepted_indices += [run]
# update run number
run += 1
# report on progress
print('run')
print(run)
print('accepted')
print(accepted)
print('incidence')
print(epi_result)
for i in range(len(self.param_ranges_unc)):
print(self.param_ranges_unc[i]['name'])
print(new_param_list[i])
print('\n')
# obtain a new parameter list for the next run
new_param_list = self.update_params(params)
def run_with_params(self, params):
"""
Run the model with the proposed parameter set.
Args:
params: The parameters to be set in the model.
"""
param_dict = self.convert_param_list_to_dict(params)
# set parameters and run
for key in param_dict: self.model_dict['uncertainty'].set_param(key, param_dict[key])
self.is_last_run_success = True
try:
self.model_dict['uncertainty'].make_times(self.integration_times['start'],
self.integration_times['finish'],
self.integration_times['step'])
self.model_dict['uncertainty'].integrate()
except:
print "Warning: parameters=%s failed with model" % params
self.is_last_run_success = False
def convert_param_list_to_dict(self, params):
"""
Extract parameters from list into dictionary that can be used for setting in the model through the
set_model_with_params method.
Args:
params: The parameter names for extraction.
Returns:
param_dict: The dictionary returned in appropriate format.
"""
param_dict = {}
for i in range(len(self.param_ranges_unc)): param_dict[self.param_ranges_unc[i]['name']] = params[i]
return param_dict
def update_params(self, old_params):
"""
Update all the parameter values being used in the uncertainty analysis.
Args:
old_params:
Returns:
new_params: The new parameters to be used in the next model run.
"""
new_params = []
# iterate through each uncertainty parameter
for i in range(len(self.param_ranges_unc)):
search_width = self.param_ranges_unc[i]['search_width']
random = -100.
# search for new parameters and add to list
while random < self.param_ranges_unc[i]['lower_bound'] or random > self.param_ranges_unc[i]['upper_bound']:
random = norm.rvs(loc=old_params[i], scale=search_width, size=1)
new_params.append(float(random[0]))
return new_params
def store_uncertainty(self):
"""
Add model results from one uncertainty run to the appropriate outputs dictionary, vertically stacking
results on to the previous matrix.
Updates:
self.epi_outputs_uncertainty
"""
# get outputs
self.epi_outputs['uncertainty'] = self.find_epi_outputs('uncertainty')
# initialise dictionaries if needed
if not self.epi_outputs_uncertainty:
self.epi_outputs_uncertainty = {'times': self.epi_outputs['uncertainty']['times']}
for output in self.epi_outputs_to_analyse:
self.epi_outputs_uncertainty[output] \
= numpy.empty(shape=[0, len(self.epi_outputs['uncertainty']['times'])])
# add uncertainty data to dictionaries
for output in self.epi_outputs_to_analyse:
self.epi_outputs_uncertainty[output] \
= numpy.vstack([self.epi_outputs_uncertainty[output], self.epi_outputs['uncertainty'][output]])
def find_time_index(self, time, model):
"""
General method to find first time point in times list at or after a certain specified time point.
Args:
time: Float for the time point of interest.
"""
return [i for i, j in enumerate(self.model_dict[model].times) if j >= time][0] - 1
raise ValueError('Time not found')
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
'''
This module contains functions to install optional components
into the current girder installation. Note that girder must
be restarted for these changes to take effect.
'''
import os
import urllib2
import tempfile
import tarfile
import shutil
import pip
from girder import constants
from girder.utility.plugin_utilities import getPluginDir
version = constants.VERSION['apiVersion']
# Default download location for optional features
defaultSource = (
'https://github.com/girder/girder/releases/download/v%s/' % version
)
def fix_path(path):
'''
Get an absolute path (while expanding ~).
:param str path: a filesystem path
:return: an absolute path
:rtype: str
'''
# first expand ~
path = os.path.expanduser(path)
# get the absolute path
return os.path.abspath(path)
def handle_source(src, dest):
'''
Stage a source specification into a temporary directory for processing.
Returns False if unsuccessful.
:param str src: source specification (filesystem or url)
:param str dest: destination path
:returns: True if success else False
:rtype: bool
'''
try: # pragma: no cover
# Try to open as a url
request = urllib2.urlopen(src)
download = tempfile.NamedTemporaryFile(suffix='.tgz')
download.file.write(request.read())
download.file.flush()
download.file.seek(0)
src = download.name
except (urllib2.URLError, ValueError):
pass
src = fix_path(src)
if os.path.isdir(src):
# This is already a directory, so copy it.
pluginName = os.path.split(src)[1]
dest = os.path.join(dest, pluginName)
shutil.copytree(src, dest)
return True
if os.path.exists(src):
# Try to open as a tarball.
try:
tgz = tarfile.open(src)
tgz.extractall(dest)
return True
except tarfile.ReadError:
pass
# Nothing else to try
return False
def install_web(source=None, force=False): # pragma: no cover
'''
Install the web client from the given source. If no source
is present it will install from the current release package
on Github.
:param str src: source specification (filesystem or url)
:param bool force: allow overwriting existing files
:returns: True if success else False
:rtype: bool
'''
if source is None:
source = defaultSource + 'girder-web-' + version + '.tar.gz'
webRoot = os.path.join(constants.STATIC_ROOT_DIR, 'clients', 'web')
clients = os.path.join(constants.PACKAGE_DIR, 'clients')
result = None
if os.path.isdir(clients):
if force:
shutil.rmtree(clients)
else:
print constants.TerminalColor.warning(
'Client files already exist at %s, use "force" to overwrite.' %
constants.STATIC_ROOT_DIR
)
return False
tmp = tempfile.mkdtemp()
try:
result = handle_source(source, tmp)
clients = os.path.join(tmp, 'clients')
if result and os.path.isdir(clients):
shutil.copytree(clients, os.path.join(
constants.PACKAGE_DIR,
'clients'
))
result = webRoot
finally:
shutil.rmtree(tmp)
return result
def install_plugin(source=None, force=False):
'''
Install one or more plugins from the given source. If no
source is given, it will install all plugins in the release
package on Github. The source provided must be a directory
or tarball containing one or more directories which
will be installed as individual plugins.
:param str src: source specification (filesystem or url)
:param bool force: allow overwriting existing files
:returns: a list of plugins that were installed
:rtype: list
'''
if source is None: # pragma: no cover
source = defaultSource + 'girder-plugins-' + version + '.tar.gz'
found = []
tmp = tempfile.mkdtemp()
try:
handle_source(source, tmp)
plugins = []
for pth in os.listdir(tmp):
pth = os.path.join(tmp, pth)
if os.path.isdir(pth):
plugins.append(pth)
for plugin in plugins:
pluginName = os.path.split(plugin)[1]
pluginTarget = os.path.join(getPluginDir(), pluginName)
if os.path.exists(pluginTarget):
if force:
shutil.rmtree(pluginTarget)
else:
print constants.TerminalColor.warning(
'A plugin already exists at %s, '
'use "force" to overwrite.' % pluginTarget
)
continue
found.append(pluginName)
shutil.copytree(plugin, pluginTarget)
requirements = os.path.join(pluginTarget, 'requirements.txt')
if os.path.exists(requirements): # pragma: no cover
print constants.TerminalColor.info(
'Attempting to install requirements for %s.\n' % pluginName
)
if pip.main(['install', '-U', '-r', requirements]) != 0:
print constants.TerminalColor.error(
'Failed to install requirements for %s.' % pluginName
)
finally:
shutil.rmtree(tmp)
return found
__all__ = ('install_plugin', 'install_web')
| |
"""
A base view with a number of mixins are provided here, as well as a simple
template view that provides an easy ``extra_context`` property.
"""
from django.db.models.base import ModelBase
from django.contrib import messages
from django.forms import Form
from django.views.generic.base import TemplateView
from django.shortcuts import redirect as redirect_shortcut
from django.utils import functional
class ObjectMixin(object):
"""
A view mixin that makes it easier to work with single object views, where
the title and breadcrumb trail might be influenced by a common factor
"""
def get_object(self, request, **kwargs):
return None
def get_form(self, request, **kwargs):
return self.form_class(request.POST or None,
instance = self.get_object(request, **kwargs)
)
def get_body_classes(self, obj, **kwargs):
return super(ObjectMixin, self).get_body_classes(**kwargs)
def get_menu_selection(self, obj, **kwargs):
return super(ObjectMixin, self).get_menu_selection(**kwargs)
def get_title_parts(self, obj, **kwargs):
return super(ObjectMixin, self).get_title_parts(**kwargs)
def get_breadcrumb_trail(self, obj, **kwargs):
return super(ObjectMixin, self).get_breadcrumb_trail(**kwargs)
def get_base_context(self, request, obj, **kwargs):
context = {
'body_classes': self.get_body_classes(obj, **kwargs),
'menu_selection': self.get_menu_selection(obj, **kwargs),
'title_parts': self.get_title_parts(obj, **kwargs),
'breadcrumb_trail': self.get_breadcrumb_trail(obj, **kwargs)
}
if isinstance(self, FormMixin):
# A hack, to mitigate the need for a specific ObjectFormMixin
context['form'] = self.get_form(request, **kwargs)
return context
def get(self, request, **kwargs):
obj = self.get_object(request, **kwargs)
context = self.get_base_context(request, obj, **kwargs)
context.update(
self.get_context_data(**kwargs)
)
context.update(
self.get_extra_context(request, **kwargs)
)
return self.render_to_response(context)
class BootstrapView(TemplateView):
"""
A class-based view to be rendered via a Bootstrap template, providing
ways to setup ``<body>`` tag classes, formula for the ``<title>`` tag,
the breadcrumb trail and a key indicating the selected main navigation item
item.
"""
body_classes = ()
"""The classes to add to the ``<body>`` tag"""
menu_selection = None
"""A key indicating the selected menu item"""
title_parts = ()
"""An interable of phrases that are concatonated with a beam, to form the
``<title>`` tag of a page"""
breadcrumb_trail = ()
"""An interable of tuples with the of the inner pair being the URL (or
relative path) and the second being the 'name' of the item. The first item
in the iterable should be the start of the breadcrumb trail"""
def get_body_classes(self, **kwargs):
return self.body_classes
def get_menu_selection(self, **kwargs):
return self.menu_selection
def get_title_parts(self, **kwargs):
return self.title_parts
def get_breadcrumb_trail(self, **kwargs):
return self.breadcrumb_trail
def get_extra_context(self, request, **kwargs):
return {}
def get_base_context(self, request, **kwargs):
"""
Sets up the base context for the templated view
"""
return {
'body_classes': self.get_body_classes(**kwargs),
'menu_selection': self.get_menu_selection(**kwargs),
'title_parts': self.get_title_parts(**kwargs),
'breadcrumb_trail': self.get_breadcrumb_trail(**kwargs)
}
def get(self, request, **kwargs):
context = self.get_base_context(request, **kwargs)
context.update(
self.get_context_data(**kwargs)
)
context.update(
self.get_extra_context(request, **kwargs)
)
return self.render_to_response(context)
def redirect(self, *args, **kwargs):
return redirect_shortcut(*args, **kwargs)
class MessageMixin(object):
"""
A view mixin that provieds a simple way to implement
``django.contrib.messages``. You can define messages for various labels
('info', 'success', 'warning', 'error') and send them via a simple
function.
"""
messages = {}
def message(self, request, key):
"""
Sends a message to a user
"""
if key in ('success', 'info', 'warning', 'error'):
fn = getattr(messages, key)
else:
fn = messages.info
fn(request, self.messages[key])
class FormMixin(MessageMixin):
"""
A view mixin that provides a form for saving data.
"""
form_class = Form
"""The type of form (should inherit from ``django.forms.ModelForm`` or
have a ``save()`` method)"""
def get_form(self, request, **kwargs):
"""
Instantiates the form
"""
return self.form_class(request.POST or None)
def get_base_context(self, request, **kwargs):
context = super(FormMixin, self).get_base_context(request, **kwargs)
context['form'] = self.get_form(request, **kwargs)
return context
def validate_form(self, request, form):
"""
Checks that the form data is valid
"""
return form.is_valid()
def save_form(self, request, form):
"""
Saves the form data and returns the saved object
"""
return form.save()
def redirect_success(self, request):
"""
Redirects back to the currently-requested URL
"""
return self.redirect('.')
def post(self, request, **kwargs):
form = self.get_form(request, **kwargs)
if self.validate_form(request, form):
obj = self.save_form(request, form)
if 'success' in self.messages:
self.message(request, 'success')
if isinstance(obj, (str, unicode)) or hasattr(obj, 'get_absolute_url'):
return redirect_shortcut(obj)
return self.redirect_success(request)
elif 'error' in self.messages:
self.message(request, 'error')
return self.get(request, **kwargs)
class DirectTemplateView(TemplateView):
"""
This is similar to Django's old direct template generic view. It's handiest when used for 'static'
pages like homepages (ie: where dynamic data may come from context processors so a standard view
isn't needed). It supports context variables via the ``extra_context`` argument.
"""
extra_context = None
def get_context_data(self, **kwargs):
context = super(self.__class__, self).get_context_data(**kwargs)
if self.extra_context is not None:
for key, value in self.extra_context.items():
if callable(value):
context[key] = value()
else:
context[key] = value
return context
| |
import os
from django.utils.translation import ugettext_lazy as _
from horizon.utils import secret_key
from openstack_dashboard import exceptions
from openstack_dashboard.settings import HORIZON_CONFIG
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# WEBROOT is the location relative to Webserver root
# should end with a slash.
WEBROOT = '/'
#LOGIN_URL = WEBROOT + 'auth/login/'
#LOGOUT_URL = WEBROOT + 'auth/logout/'
#
# LOGIN_REDIRECT_URL can be used as an alternative for
# HORIZON_CONFIG.user_home, if user_home is not set.
# Do not set it to '/home/', as this will cause circular redirect loop
#LOGIN_REDIRECT_URL = WEBROOT
# Required for Django 1.5.
# If horizon is running in production (DEBUG is False), set this
# with the list of host/domain names that the application can serve.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['horizon.example.com',
'192.168.187.1'
]
# Set SSL proxy settings:
# For Django 1.4+ pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# https://docs.djangoproject.com/en/1.5/ref/settings/#secure-proxy-ssl-header
#SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specific API version for a given service API.
# Versions specified here should be integers or floats, not strings.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be 2.0 or 3.
#OPENSTACK_API_VERSIONS = {
# "data-processing": 1.1,
# "identity": 3,
# "volume": 2,
#}
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
#OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# Set Console type:
# valid options are "AUTO"(default), "VNC", "SPICE", "RDP", "SERIAL" or None
# Set to None explicitly if you want to deactivate the console.
#CONSOLE_TYPE = "AUTO"
# If provided, a "Report Bug" link will be displayed in the site header
# which links to the value of this setting (ideally a URL containing
# information on how to report issues).
#HORIZON_CONFIG["bug_url"] = "http://bug-report.example.com"
# Show backdrop element outside the modal, do not close the modal
# after clicking on backdrop.
#HORIZON_CONFIG["modal_backdrop"] = "static"
# Specify a regular expression to validate user passwords.
#HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements."),
#}
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
#HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for forms including the login form and
# the database creation workflow if so desired.
#HORIZON_CONFIG["password_autocomplete"] = "off"
# Setting this to True will disable the reveal button for password fields,
# including on the login form.
#HORIZON_CONFIG["disable_password_reveal"] = False
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# Set custom secret key:
# You can either set it to a specific value or you can let horizon generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However,
# there may be situations where you would want to set this explicitly, e.g.
# when multiple dashboard instances are distributed on different machines
# (usually behind a load-balancer). Either you have to make sure that a session
# gets all requests routed to the same dashboard instance or you set the same
# SECRET_KEY for all of them.
SECRET_KEY = secret_key.generate_or_read_from_file(
os.path.join(LOCAL_PATH, '.secret_key_store'))
# We recommend you use memcached for development; otherwise after every reload
# of the django development server, you will have to login again. To use
# memcached set CACHES to something like
#CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': '127.0.0.1:11211',
# }
#}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
#'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
#EMAIL_HOST = 'smtp.my-company.com'
#EMAIL_PORT = 25
#EMAIL_HOST_USER = 'djangomail'
#EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
#AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
#]
OPENSTACK_HOST = "127.0.0.1"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
# Enables keystone web single-sign-on if set to True.
#WEBSSO_ENABLED = False
# Determines which authentication choice to show as default.
#WEBSSO_INITIAL_CHOICE = "credentials"
# The list of authentication mechanisms which include keystone
# federation protocols and identity provider/federation protocol
# mapping keys (WEBSSO_IDP_MAPPING). Current supported protocol
# IDs are 'saml2' and 'oidc' which represent SAML 2.0, OpenID
# Connect respectively.
# Do not remove the mandatory credentials mechanism.
# Note: The last two tuples are sample mapping keys to a identity provider
# and federation protocol combination (WEBSSO_IDP_MAPPING).
#WEBSSO_CHOICES = (
# ("credentials", _("Keystone Credentials")),
# ("oidc", _("OpenID Connect")),
# ("saml2", _("Security Assertion Markup Language")),
# ("acme_oidc", "ACME - OpenID Connect"),
# ("acme_saml2", "ACME - SAML2")
#)
# A dictionary of specific identity provider and federation protocol
# combinations. From the selected authentication mechanism, the value
# will be looked up as keys in the dictionary. If a match is found,
# it will redirect the user to a identity provider and federation protocol
# specific WebSSO endpoint in keystone, otherwise it will use the value
# as the protocol_id when redirecting to the WebSSO by protocol endpoint.
# NOTE: The value is expected to be a tuple formatted as: (<idp_id>, <protocol_id>).
#WEBSSO_IDP_MAPPING = {
# "acme_oidc": ("acme", "oidc"),
# "acme_saml2": ("acme", "saml2")
#}
# Disable SSL certificate checks (useful for self-signed certificates):
#OPENSTACK_SSL_NO_VERIFY = True
# The CA certificate to use to verify SSL connections
#OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True,
}
# Setting this to True, will add a new "Retrieve Password" action on instance,
# allowing Admin session password retrieval/decryption.
#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
# The Launch Instance user experience has been significantly enhanced.
# You can choose whether to enable the new launch instance experience,
# the legacy experience, or both. The legacy experience will be removed
# in a future release, but is available as a temporary backup setting to ensure
# compatibility with existing deployments. Further development will not be
# done on the legacy experience. Please report any problems with the new
# experience via the Launchpad tracking system.
#
# Toggle LAUNCH_INSTANCE_LEGACY_ENABLED and LAUNCH_INSTANCE_NG_ENABLED to
# determine the experience to enable. Set them both to true to enable
# both.
#LAUNCH_INSTANCE_LEGACY_ENABLED = True
#LAUNCH_INSTANCE_NG_ENABLED = False
# The Xen Hypervisor has the ability to set the mount point for volumes
# attached to instances (other Hypervisors currently do not). Setting
# can_set_mount_point to True will add the option to set the mount point
# from the UI.
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': False,
'requires_keypair': False,
}
# The OPENSTACK_CINDER_FEATURES settings can be used to enable optional
# services provided by cinder that is not exposed by its extension API.
OPENSTACK_CINDER_FEATURES = {
'enable_backup': False,
}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currently available are load
# balancer service, security groups, quotas, VPN service.
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': True,
'enable_quotas': True,
'enable_ipv6': True,
'enable_distributed_router': False,
'enable_ha_router': False,
#'enable_lb': True,
'enable_lb': False,
#'enable_firewall': True,
'enable_firewall': False,
#'enable_vpn': True,
'enable_vpn': False,
#'enable_fip_topology_check': True,
# Neutron can be configured with a default Subnet Pool to be used for IPv4
# subnet-allocation. Specify the label you wish to display in the Address
# pool selector on the create subnet step if you want to use this feature.
'default_ipv4_subnet_pool_label': None,
# Neutron can be configured with a default Subnet Pool to be used for IPv6
# subnet-allocation. Specify the label you wish to display in the Address
# pool selector on the create subnet step if you want to use this feature.
# You must set this to enable IPv6 Prefix Delegation in a PD-capable
# environment.
'default_ipv6_subnet_pool_label': None,
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
'profile_support': None,
#'profile_support': 'cisco',
# Set which provider network types are supported. Only the network types
# in this list will be available to choose from when creating a network.
# Network types include local, flat, vlan, gre, and vxlan.
'supported_provider_types': ['*'],
# Set which VNIC types are supported for port binding. Only the VNIC
# types in this list will be available to choose from when creating a
# port.
# VNIC types include 'normal', 'macvtap' and 'direct'.
# Set to empty list or None to disable VNIC type selection.
'supported_vnic_types': ['*']
}
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
#OPENSTACK_IMAGE_BACKEND = {
# 'image_formats': [
# ('', _('Select format')),
# ('aki', _('AKI - Amazon Kernel Image')),
# ('ami', _('AMI - Amazon Machine Image')),
# ('ari', _('ARI - Amazon Ramdisk Image')),
# ('docker', _('Docker')),
# ('iso', _('ISO - Optical Disk Image')),
# ('ova', _('OVA - Open Virtual Appliance')),
# ('qcow2', _('QCOW2 - QEMU Emulator')),
# ('raw', _('Raw')),
# ('vdi', _('VDI - Virtual Disk Image')),
# ('vhd', ('VHD - Virtual Hard Disk')),
# ('vmdk', _('VMDK - Virtual Machine Disk')),
# ]
#}
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type"),
}
# The IMAGE_RESERVED_CUSTOM_PROPERTIES setting is used to specify which image
# custom properties should not be displayed in the Image Custom Properties
# table.
IMAGE_RESERVED_CUSTOM_PROPERTIES = []
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is None. This
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
#SECONDARY_ENDPOINT_TYPE = "publicURL"
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The size of chunk in bytes for downloading objects from Swift
SWIFT_FILE_TRANSFER_CHUNK_SIZE = 512 * 1024
# Specify a maximum number of items to display in a dropdown.
DROPDOWN_MAX_ITEMS = 30
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. If you would like a different sort order,
# you can provide another flavor attribute as sorting key. Alternatively, you
# can provide a custom callback method to use for sorting. You can also provide
# a flag for reverse sort. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
#CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': 'name',
# # or
# 'key': my_awesome_callback_method,
# 'reverse': False,
#}
# Set this to True to display an 'Admin Password' field on the Change Password
# form to verify that it is indeed the admin logged-in who wants to change
# the password.
#ENFORCE_PASSWORD_CHECK = False
# Modules that provide /auth routes that can be used to handle different types
# of user authentication. Add auth plugins that require extra route handling to
# this list.
#AUTHENTICATION_URLS = [
# 'openstack_auth.urls',
#]
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files.
# Please insure that your identity policy file matches the one being used on
# your keystone servers. There is an alternate policy file that may be used
# in the Keystone v3 multi-domain case, policy.v3cloudsample.json.
# This file is not included in the Horizon repository by default but can be
# found at
# http://git.openstack.org/cgit/openstack/keystone/tree/etc/ \
# policy.v3cloudsample.json
# Having matching policy files on the Horizon and Keystone servers is essential
# for normal operation. This holds true for all services and their policy files.
#POLICY_FILES = {
# 'identity': 'keystone_policy.json',
# 'compute': 'nova_policy.json',
# 'volume': 'cinder_policy.json',
# 'image': 'glance_policy.json',
# 'orchestration': 'heat_policy.json',
# 'network': 'neutron_policy.json',
# 'telemetry': 'ceilometer_policy.json',
#}
# Trove user and database extension support. By default support for
# creating users and databases on database instances is turned on.
# To disable these extensions set the permission here to something
# unusable such as ["!"].
#TROVE_ADD_USER_PERMS = []
#TROVE_ADD_DATABASE_PERMS = []
SESSION_ENGINE = 'django.core.cache.backends.db.DatabaseCache'
DATABASES = {
'default': {
# Database configuration here
'ENGINE': 'django.db.backends.mysql',
'NAME': 'horizon',
'USER': 'root',
'PASSWORD': '123456',
'HOST': 'localhost',
'PORT': '3306',
'OPTION': {
'autocommit': False,
},
}
}
# Change this patch to the appropriate static directory containing
# two files: _variables.scss and _styles.scss
#CUSTOM_THEME_PATH = 'themes/default'
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'keystoneclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'neutronclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'heatclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'ceilometerclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'troveclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'swiftclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_auth': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
'scss': {
'handlers': ['null'],
'propagate': False,
},
}
}
# 'direction' should not be specified for all_tcp/udp/icmp.
# It is specified in the form.
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': _('All TCP'),
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': _('All UDP'),
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': _('All ICMP'),
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
'ssh': {
'name': 'SSH',
'ip_protocol': 'tcp',
'from_port': '22',
'to_port': '22',
},
'smtp': {
'name': 'SMTP',
'ip_protocol': 'tcp',
'from_port': '25',
'to_port': '25',
},
'dns': {
'name': 'DNS',
'ip_protocol': 'tcp',
'from_port': '53',
'to_port': '53',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
'pop3': {
'name': 'POP3',
'ip_protocol': 'tcp',
'from_port': '110',
'to_port': '110',
},
'imap': {
'name': 'IMAP',
'ip_protocol': 'tcp',
'from_port': '143',
'to_port': '143',
},
'ldap': {
'name': 'LDAP',
'ip_protocol': 'tcp',
'from_port': '389',
'to_port': '389',
},
'https': {
'name': 'HTTPS',
'ip_protocol': 'tcp',
'from_port': '443',
'to_port': '443',
},
'smtps': {
'name': 'SMTPS',
'ip_protocol': 'tcp',
'from_port': '465',
'to_port': '465',
},
'imaps': {
'name': 'IMAPS',
'ip_protocol': 'tcp',
'from_port': '993',
'to_port': '993',
},
'pop3s': {
'name': 'POP3S',
'ip_protocol': 'tcp',
'from_port': '995',
'to_port': '995',
},
'ms_sql': {
'name': 'MS SQL',
'ip_protocol': 'tcp',
'from_port': '1433',
'to_port': '1433',
},
'mysql': {
'name': 'MYSQL',
'ip_protocol': 'tcp',
'from_port': '3306',
'to_port': '3306',
},
'rdp': {
'name': 'RDP',
'ip_protocol': 'tcp',
'from_port': '3389',
'to_port': '3389',
},
}
# Deprecation Notice:
#
# The setting FLAVOR_EXTRA_KEYS has been deprecated.
# Please load extra spec metadata into the Glance Metadata Definition Catalog.
#
# The sample quota definitions can be found in:
# <glance_source>/etc/metadefs/compute-quota.json
#
# The metadata definition catalog supports CLI and API:
# $glance --os-image-api-version 2 help md-namespace-import
# $glance-manage db_load_metadefs <directory_with_definition_files>
#
# See Metadata Definitions on: http://docs.openstack.org/developer/glance/
# Indicate to the Sahara data processing service whether or not
# automatic floating IP allocation is in effect. If it is not
# in effect, the user will be prompted to choose a floating IP
# pool for use in their cluster. False by default. You would want
# to set this to True if you were running Nova Networking with
# auto_assign_floating_ip = True.
#SAHARA_AUTO_IP_ALLOCATION_ENABLED = False
# The hash algorithm to use for authentication tokens. This must
# match the hash algorithm that the identity server and the
# auth_token middleware are using. Allowed values are the
# algorithms supported by Python's hashlib library.
#OPENSTACK_TOKEN_HASH_ALGORITHM = 'md5'
# Hashing tokens from Keystone keeps the Horizon session data smaller, but it
# doesn't work in some cases when using PKI tokens. Uncomment this value and
# set it to False if using PKI tokens and there are 401 errors due to token
# hashing.
#OPENSTACK_TOKEN_HASH_ENABLED = True
# AngularJS requires some settings to be made available to
# the client side. Some settings are required by in-tree / built-in horizon
# features. These settings must be added to REST_API_REQUIRED_SETTINGS in the
# form of ['SETTING_1','SETTING_2'], etc.
#
# You may remove settings from this list for security purposes, but do so at
# the risk of breaking a built-in horizon feature. These settings are required
# for horizon to function properly. Only remove them if you know what you
# are doing. These settings may in the future be moved to be defined within
# the enabled panel configuration.
# You should not add settings to this list for out of tree extensions.
# See: https://wiki.openstack.org/wiki/Horizon/RESTAPI
REST_API_REQUIRED_SETTINGS = ['OPENSTACK_HYPERVISOR_FEATURES']
# Additional settings can be made available to the client side for
# extensibility by specifying them in REST_API_ADDITIONAL_SETTINGS
# !! Please use extreme caution as the settings are transferred via HTTP/S
# and are not encrypted on the browser. This is an experimental API and
# may be deprecated in the future without notice.
#REST_API_ADDITIONAL_SETTINGS = []
# DISALLOW_IFRAME_EMBED can be used to prevent Horizon from being embedded
# within an iframe. Legacy browsers are still vulnerable to a Cross-Frame
# Scripting (XFS) vulnerability, so this option allows extra security hardening
# where iframes are not used in deployment. Default setting is True.
# For more information see:
# http://tinyurl.com/anticlickjack
#DISALLOW_IFRAME_EMBED = True
| |
"""
Lazy Evaluation for Python - main package with primary exports
Copyright (c) 2004, Georg Bauer <gb@murphy.bofh.ms>,
Copyright (c) 2011, Alexander Marshalov <alone.amper@gmail.com>,
except where the file explicitly names other copyright holders and licenses.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
import sys
import unittest
from lazypy import *
from lazypy.Utils import *
class MySpecialError(Exception):
pass
def anton(a,b):
return a+b
class ClassWithAttrs:
pass
class ClassWithLazyMethod:
def __init__(self):
self.attr = self.anton(5,6)
def anton(self, a, b):
return a+b
anton = lazy(anton)
class MyPromise(object):
__metaclass__ = PromiseMetaClass
def __init__(self, func, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
self.__result = NoneSoFar
def forced(self):
return self.__result is not NoneSoFar
def __force__(self):
if self.__result is NoneSoFar:
args = [force(arg) for arg in self.__args]
kw = dict([(k, force(v)) for (k, v) in self.__kw.items()])
self.__result = self.__func(*args, **kw)
return self.__result
class LazyClass(LazyEvaluated):
__promiseclass__ = MyPromise
def anton(self, a, b):
return a+b
def berta(self, a, b):
return a*b
def caesar(self):
return 'blah'
def detlef(self):
res = ClassWithAttrs()
res.blah = ClassWithAttrs()
res.blah.blubb = 5
return res
class TestCase100Simple(unittest.TestCase):
def testDelay(self):
promise = delay(anton, (5, 6))
self.assertTrue(isinstance(promise, Promise))
self.assertEqual(promise, 11)
self.assertEqual(promise, 11)
def testDelayList(self):
def berta(a,b):
return range(a,b)
promise = delay(berta, (0, 10))
self.assertTrue(isinstance(promise, Promise))
self.assertEqual(len(promise), 10)
self.assertEqual(len(promise), 10)
def testIntegers(self):
funk = lazy(anton)
self.assertTrue(isinstance(funk(5,6), Promise))
self.assertEqual(funk(5,6), 11)
self.assertEqual(str(funk(5,6)), '11')
self.assertEqual(-funk(5,6), -11)
def testBinary(self):
funk = lazy(anton)
self.assertEqual(funk(5,6)|funk(9,7), 27)
self.assertEqual(funk(5,6)&funk(9,7), 0)
def testIntegerCombined(self):
funk = lazy(anton)
self.assertEqual(funk(5,6)+11, 22)
self.assertEqual(11+funk(5,6), 22)
self.assertEqual(funk(5,6)*3, 33)
self.assertEqual(3*funk(5,6), 33)
def testBools(self):
funk = lazy(anton)
self.assertTrue(funk(5,6))
self.assertFalse(funk(5,-5))
def testFloats(self):
funk = lazy(anton)
self.assertTrue(isinstance(funk(5.1,6.2), Promise))
self.assertEqual(funk(5.1,6.2), 11.3)
def testLongs(self):
funk = lazy(anton)
self.assertTrue(isinstance(funk(5,6), Promise))
self.assertEqual(funk(3333333333333,5555555555555), 8888888888888)
def testStrings(self):
funk = lazy(anton)
self.assertTrue(isinstance(funk('anton','berta'), Promise))
self.assertEqual(str(funk('anton', 'berta')), 'antonberta')
self.assertEqual(funk('anton', 'berta'), 'antonberta')
self.assertTrue(funk('anton', 'berta') > 'anton')
def testStringConcat(self):
funk = lazy(anton)
self.assertEqual(funk('anton', 'berta') + 'blah', 'antonbertablah')
self.assertEqual('blah' + funk('anton', 'berta'), 'blahantonberta')
def testStringMult(self):
funk = lazy(anton)
self.assertEqual(funk('anton','berta')*2, 'antonbertaantonberta')
self.assertEqual(2*funk('anton','berta'), 'antonbertaantonberta')
self.assertEqual('blah'*funk(1,1), 'blahblah')
self.assertEqual(funk(1,1)*'blah', 'blahblah')
def testUnicode(self):
funk = lazy(anton)
self.assertTrue(isinstance(funk('anton','berta'), Promise))
self.assertEqual(unicode(funk('anton', 'berta')), 'antonberta')
self.assertEqual(funk('anton', 'berta'), 'antonberta')
def testAttribute(self):
funk = lazy(anton)
a = getattr(funk(5,6), 'isnich', 99)
self.assertEqual(a, 99)
def testStringInterpolation(self):
funk = lazy(anton)
self.assertEqual((funk('blah%s', 'blubb%d') % ('anton', 5)), 'blahantonblubb5')
class TestCase200LazyDicts(unittest.TestCase):
def setUp(self):
def anton(a,b):
return dict([(x,x) for x in range(a,b)])
self.g = lazy(anton)
def testCreate(self):
hash = self.g(1,6)
self.assertEqual(hash, {1:1, 2:2, 3:3, 4:4, 5:5})
def testKeys(self):
hash = self.g(1,6)
l = force(hash).keys()
l = sorted(l)
self.assertEqual(l, [1,2,3,4,5])
def testValues(self):
hash = self.g(1,6)
l = force(hash).values()
l = sorted(l)
self.assertEqual(l, [1,2,3,4,5])
def testAccess(self):
hash = self.g(1,6)
self.assertEqual(hash[3], 3)
class TestCase300LazyClass(unittest.TestCase):
def setUp(self):
self.obj = LazyClass()
def testPromises(self):
self.assertTrue(isinstance(self.obj.anton(5,6), MyPromise))
def testForcing(self):
promise = self.obj.anton(5,6)
self.assertTrue(isinstance(promise, MyPromise))
self.assertFalse(promise.forced())
self.assertEqual(promise, 11)
self.assertTrue(promise.forced())
def testDirectAccess(self):
self.assertEqual(self.obj.anton(5,6), 11)
def testBinaryOperator(self):
self.assertEqual(self.obj.anton(5,6)+self.obj.berta(5,6), 41)
def testMixedNumeric(self):
self.assertEqual(self.obj.anton(5,6)+5, 16)
self.assertEqual(5+self.obj.anton(5,6), 16)
def testStringMultiply(self):
self.assertEqual(self.obj.caesar()*3, 'blahblahblah')
def testAttributeAccess(self):
self.assertEqual(force(self.obj.detlef()).blah.blubb, 5)
def testInlineModification(self):
anton = self.obj.anton(5,6)
anton += 11
self.assertEqual(anton, 22)
class TestCase400LazyLists(unittest.TestCase):
def setUp(self):
def berta(a,b):
return range(a,b)
self.func = lazy(berta)
def testCreate(self):
l = self.func(0,10)
self.assertTrue(isinstance(l, Promise))
self.assertEqual(l, range(0,10))
def testAccess(self):
self.assertEqual(self.func(0,10)[6], 6)
self.assertEqual(self.func(0,10)[0], 0)
self.assertEqual(self.func(0,10)[-1], 9)
def testSlice(self):
self.assertEqual(self.func(0,10)[6:8], [6,7])
self.assertEqual(self.func(0,10)[:2], [0,1])
self.assertEqual(self.func(0,10)[-2:], [8,9])
self.assertEqual(self.func(0,10)[:], range(0,10))
def testExtendedSlice(self):
self.assertEqual(self.func(0,10)[1:5:2], [1,3])
def testLen(self):
self.assertEqual(len(self.func(0,5)), 5)
def testIter(self):
l = []
for el in self.func(0,10):
l.append(el)
self.assertEqual(l, range(0,10))
def testListComprehension(self):
l = [int(x) for x in self.func(0,10)]
self.assertEqual(l, range(0,10))
class TestCase500Futures(unittest.TestCase):
def testFastFuture(self):
f = spawn(lambda : 5+6)
self.assertTrue(isinstance(f, Future))
self.assertEqual(f, 11)
def testLongerFuture(self):
def fib(n):
if n in (0,1):
return 1
return fib(n-1) + fib(n-2)
f = future(fib)
for n in (5, 10, 20):
self.assertEqual(f(n), fib(n))
class TestCase550ForkedFutures(unittest.TestCase):
def testFastFuture(self):
f = fork(lambda : 5+6)
self.assertTrue(isinstance(f, ForkedFuture))
self.assertEqual(f, 11)
self.assertEqual(f, 11)
self.assertEqual(f, 11)
def testFutureWithException(self):
def crasher():
raise MySpecialError(55)
f = fork(crasher)
self.assertTrue(isinstance(f, ForkedFuture))
self.assertRaises(MySpecialError, crasher)
def testLongerFuture(self):
def fib(n):
if n in (0,1):
return 1
return fib(n-1) + fib(n-2)
f = forked(fib)
for n in (5, 10, 20, 30):
self.assertEqual(f(n), fib(n))
class TestCase600LazyMethod(unittest.TestCase):
def testAttribute(self):
o = ClassWithLazyMethod()
self.assertTrue(isinstance(o.attr, Promise))
self.assertEqual(o.attr, 11)
def testNonZero(self):
o = ClassWithLazyMethod()
self.assertTrue(isinstance(o.attr, Promise))
self.assertEqual(getattr(o.attr, 'blah', True), True)
def testString(self):
o = ClassWithLazyMethod()
self.assertTrue(isinstance(o.attr, Promise))
self.assertEqual(('a%sb' % o.attr), 'a11b')
def testInteger(self):
o = ClassWithLazyMethod()
self.assertTrue(isinstance(o.attr, Promise))
self.assertEqual(5+o.attr, 16)
if __name__ == '__main__':
unittest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import netaddr
from oslo.config import cfg
import webob.exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as ntn_exc
import neutron.common.test_lib as test_lib
from neutron import context
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.extensions import l3_ext_gw_mode
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as secgrp
from neutron import manager
from neutron.manager import NeutronManager
from neutron.openstack.common import uuidutils
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import sync
from neutron.plugins.nicira.dbexts import nicira_db
from neutron.plugins.nicira.dbexts import nicira_qos_db as qos_db
from neutron.plugins.nicira.extensions import distributedrouter as dist_router
from neutron.plugins.nicira.extensions import nvp_networkgw
from neutron.plugins.nicira.extensions import nvp_qos as ext_qos
from neutron.plugins.nicira import NeutronPlugin
from neutron.plugins.nicira import NvpApiClient
from neutron.plugins.nicira.NvpApiClient import NVPVersion
from neutron.plugins.nicira import nvplib
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.nicira import fake_nvpapiclient
from neutron.tests.unit.nicira import get_fake_conf
from neutron.tests.unit.nicira import NVPAPI_NAME
from neutron.tests.unit.nicira import NVPEXT_PATH
from neutron.tests.unit.nicira import PLUGIN_NAME
from neutron.tests.unit.nicira import STUBS_PATH
import neutron.tests.unit.nicira.test_networkgw as test_l2_gw
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair
import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode
import neutron.tests.unit.test_extension_portsecurity as psec
import neutron.tests.unit.test_extension_security_group as ext_sg
from neutron.tests.unit import test_extensions
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
from neutron.tests.unit import testlib_api
from neutron.openstack.common import log
LOG = log.getLogger(__name__)
class NiciraPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, providernet_args=None, **kwargs):
data = {'network': {'name': name,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
# Fix to allow the router:external attribute and any other
# attributes containing a colon to be passed with
# a double underscore instead
kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items())
if external_net.EXTERNAL in kwargs:
arg_list = (external_net.EXTERNAL, ) + (arg_list or ())
attrs = kwargs
if providernet_args:
attrs.update(providernet_args)
for arg in (('admin_state_up', 'tenant_id', 'shared') +
(arg_list or ())):
# Arg must be present and not empty
if arg in kwargs and kwargs[arg]:
data['network'][arg] = kwargs[arg]
network_req = self.new_create_request('networks', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
network_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
return network_req.get_response(self.api)
def setUp(self,
plugin=PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
test_lib.test_config['config_files'] = [get_fake_conf('nvp.ini.test')]
# mock nvp api client
self.fc = fake_nvpapiclient.FakeClient(STUBS_PATH)
self.mock_nvpapi = mock.patch(NVPAPI_NAME, autospec=True)
self.mock_instance = self.mock_nvpapi.start()
# Avoid runs of the synchronizer looping call
patch_sync = mock.patch.object(sync, '_start_loopingcall')
patch_sync.start()
def _fake_request(*args, **kwargs):
return self.fc.fake_request(*args, **kwargs)
# Emulate tests against NVP 2.x
self.mock_instance.return_value.get_nvp_version.return_value = (
NVPVersion("2.9"))
self.mock_instance.return_value.request.side_effect = _fake_request
plugin = plugin or PLUGIN_NAME
super(NiciraPluginV2TestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
cfg.CONF.set_override('metadata_mode', None, 'NVP')
self.addCleanup(self.fc.reset_all)
self.addCleanup(mock.patch.stopall)
class TestNiciraBasicGet(test_plugin.TestBasicGet, NiciraPluginV2TestCase):
pass
class TestNiciraV2HTTPResponse(test_plugin.TestV2HTTPResponse,
NiciraPluginV2TestCase):
pass
class TestNiciraProvidernet(NiciraPluginV2TestCase):
def test_create_provider_network_default_physical_net(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'tenant_id': 'admin',
pnet.NETWORK_TYPE: 'vlan',
pnet.SEGMENTATION_ID: 411}}
network_req = self.new_create_request('networks', data, self.fmt)
net = self.deserialize(self.fmt, network_req.get_response(self.api))
self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
def test_create_provider_network(self):
data = {'network': {'name': 'net1',
'admin_state_up': True,
'tenant_id': 'admin',
pnet.NETWORK_TYPE: 'vlan',
pnet.SEGMENTATION_ID: 411,
pnet.PHYSICAL_NETWORK: 'physnet1'}}
network_req = self.new_create_request('networks', data, self.fmt)
net = self.deserialize(self.fmt, network_req.get_response(self.api))
self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
class TestNiciraPortsV2(NiciraPluginV2TestCase,
test_plugin.TestPortsV2,
test_bindings.PortBindingsTestCase,
test_bindings.PortBindingsHostTestCaseMixin):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = True
def test_exhaust_ports_overlay_network(self):
cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NVP')
with self.network(name='testnet',
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
with self.subnet(network=net) as sub:
with self.port(subnet=sub):
# creating another port should see an exception
self._create_port('json', net['network']['id'], 400)
def test_exhaust_ports_bridged_network(self):
cfg.CONF.set_override('max_lp_per_bridged_ls', 1, group="NVP")
providernet_args = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'tzuuid'}
with self.network(name='testnet',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
with self.subnet(network=net) as sub:
with self.port(subnet=sub):
with self.port(subnet=sub):
plugin = manager.NeutronManager.get_plugin()
ls = nvplib.get_lswitches(plugin.cluster,
net['network']['id'])
self.assertEqual(len(ls), 2)
def test_update_port_delete_ip(self):
# This test case overrides the default because the nvp plugin
# implements port_security/security groups and it is not allowed
# to remove an ip address from a port unless the security group
# is first removed.
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [],
secgrp.SECURITYGROUPS: []}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self.assertEqual(res['port']['fixed_ips'],
data['port']['fixed_ips'])
def test_create_port_name_exceeds_40_chars(self):
name = 'this_is_a_port_whose_name_is_longer_than_40_chars'
with self.port(name=name) as port:
# Assert the neutron name is not truncated
self.assertEqual(name, port['port']['name'])
def _verify_no_orphan_left(self, net_id):
# Verify no port exists on net
# ie: cleanup on db was successful
query_params = "network_id=%s" % net_id
self._test_list_resources('port', [],
query_params=query_params)
# Also verify no orphan port was left on nvp
# no port should be there at all
self.assertFalse(self.fc._fake_lswitch_lport_dict)
def test_create_port_nvp_error_no_orphan_left(self):
with mock.patch.object(nvplib, 'create_lport',
side_effect=NvpApiClient.NvpApiException):
with self.network() as net:
net_id = net['network']['id']
self._create_port(self.fmt, net_id,
webob.exc.HTTPInternalServerError.code)
self._verify_no_orphan_left(net_id)
def test_create_port_neutron_error_no_orphan_left(self):
with mock.patch.object(nicira_db, 'add_neutron_nvp_port_mapping',
side_effect=ntn_exc.NeutronException):
with self.network() as net:
net_id = net['network']['id']
self._create_port(self.fmt, net_id,
webob.exc.HTTPInternalServerError.code)
self._verify_no_orphan_left(net_id)
def test_create_port_maintenance_returns_503(self):
with self.network() as net:
with mock.patch.object(nvplib, 'do_request',
side_effect=nvp_exc.MaintenanceInProgress):
data = {'port': {'network_id': net['network']['id'],
'admin_state_up': False,
'fixed_ips': [],
'tenant_id': self._tenant_id}}
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'get_network',
return_value=net['network']):
port_req = self.new_create_request('ports', data, self.fmt)
res = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
class TestNiciraNetworksV2(test_plugin.TestNetworksV2,
NiciraPluginV2TestCase):
def _test_create_bridge_network(self, vlan_id=None):
net_type = vlan_id and 'vlan' or 'flat'
name = 'bridge_net'
expected = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', 'ACTIVE'), ('shared', False),
(pnet.NETWORK_TYPE, net_type),
(pnet.PHYSICAL_NETWORK, 'tzuuid'),
(pnet.SEGMENTATION_ID, vlan_id)]
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK: 'tzuuid'}
if vlan_id:
providernet_args[pnet.SEGMENTATION_ID] = vlan_id
with self.network(name=name,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
for k, v in expected:
self.assertEqual(net['network'][k], v)
def test_create_bridge_network(self):
self._test_create_bridge_network()
def test_create_bridge_vlan_network(self):
self._test_create_bridge_network(vlan_id=123)
def test_create_bridge_vlan_network_outofrange_returns_400(self):
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_bridge_network(vlan_id=5000)
self.assertEqual(ctx_manager.exception.code, 400)
def test_list_networks_filter_by_id(self):
# We add this unit test to cover some logic specific to the
# nvp plugin
with contextlib.nested(self.network(name='net1'),
self.network(name='net2')) as (net1, net2):
query_params = 'id=%s' % net1['network']['id']
self._test_list_resources('network', [net1],
query_params=query_params)
query_params += '&id=%s' % net2['network']['id']
self._test_list_resources('network', [net1, net2],
query_params=query_params)
def test_delete_network_after_removing_subet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
fmt = 'json'
# Create new network
res = self._create_network(fmt=fmt, name='net',
admin_state_up=True)
network = self.deserialize(fmt, res)
subnet = self._make_subnet(fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
sub_del_res = req.get_response(self.api)
self.assertEqual(sub_del_res.status_int, 204)
req = self.new_delete_request('networks', network['network']['id'])
net_del_res = req.get_response(self.api)
self.assertEqual(net_del_res.status_int, 204)
def test_list_networks_with_shared(self):
with self.network(name='net1'):
with self.network(name='net2', shared=True):
req = self.new_list_request('networks')
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(len(res['networks']), 2)
req_2 = self.new_list_request('networks')
req_2.environ['neutron.context'] = context.Context('',
'somebody')
res = self.deserialize('json', req_2.get_response(self.api))
# tenant must see a single network
self.assertEqual(len(res['networks']), 1)
def test_create_network_name_exceeds_40_chars(self):
name = 'this_is_a_network_whose_name_is_longer_than_40_chars'
with self.network(name=name) as net:
# Assert neutron name is not truncated
self.assertEqual(net['network']['name'], name)
def test_create_network_maintenance_returns_503(self):
data = {'network': {'name': 'foo',
'admin_state_up': True,
'tenant_id': self._tenant_id}}
with mock.patch.object(nvplib, 'do_request',
side_effect=nvp_exc.MaintenanceInProgress):
net_req = self.new_create_request('networks', data, self.fmt)
res = net_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
def test_update_network_with_admin_false(self):
data = {'network': {'admin_state_up': False}}
with self.network() as net:
plugin = manager.NeutronManager.get_plugin()
self.assertRaises(NotImplementedError,
plugin.update_network,
context.get_admin_context(),
net['network']['id'], data)
class NiciraPortSecurityTestCase(psec.PortSecurityDBTestCase):
def setUp(self):
test_lib.test_config['config_files'] = [get_fake_conf('nvp.ini.test')]
# mock nvp api client
self.fc = fake_nvpapiclient.FakeClient(STUBS_PATH)
self.mock_nvpapi = mock.patch(NVPAPI_NAME, autospec=True)
instance = self.mock_nvpapi.start()
instance.return_value.login.return_value = "the_cookie"
# Avoid runs of the synchronizer looping call
patch_sync = mock.patch.object(sync, '_start_loopingcall')
patch_sync.start()
def _fake_request(*args, **kwargs):
return self.fc.fake_request(*args, **kwargs)
instance.return_value.request.side_effect = _fake_request
super(NiciraPortSecurityTestCase, self).setUp(PLUGIN_NAME)
self.addCleanup(self.fc.reset_all)
self.addCleanup(self.mock_nvpapi.stop)
self.addCleanup(patch_sync.stop)
class TestNiciraPortSecurity(NiciraPortSecurityTestCase,
psec.TestPortSecurity):
pass
class TestNiciraAllowedAddressPairs(test_addr_pair.TestAllowedAddressPairs,
NiciraPluginV2TestCase):
pass
class NiciraSecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase):
def setUp(self):
test_lib.test_config['config_files'] = [get_fake_conf('nvp.ini.test')]
# mock nvp api client
fc = fake_nvpapiclient.FakeClient(STUBS_PATH)
self.mock_nvpapi = mock.patch(NVPAPI_NAME, autospec=True)
instance = self.mock_nvpapi.start()
instance.return_value.login.return_value = "the_cookie"
# Avoid runs of the synchronizer looping call
patch_sync = mock.patch.object(sync, '_start_loopingcall')
patch_sync.start()
def _fake_request(*args, **kwargs):
return fc.fake_request(*args, **kwargs)
instance.return_value.request.side_effect = _fake_request
self.addCleanup(self.mock_nvpapi.stop)
self.addCleanup(patch_sync.stop)
super(NiciraSecurityGroupsTestCase, self).setUp(PLUGIN_NAME)
class TestNiciraSecurityGroup(ext_sg.TestSecurityGroups,
NiciraSecurityGroupsTestCase):
def test_create_security_group_name_exceeds_40_chars(self):
name = 'this_is_a_secgroup_whose_name_is_longer_than_40_chars'
with self.security_group(name=name) as sg:
# Assert Neutron name is not truncated
self.assertEqual(sg['security_group']['name'], name)
def test_create_security_group_rule_bad_input(self):
name = 'foo security group'
description = 'foo description'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 200
min_range = 32
max_range = 4343
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol,
min_range, max_range)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
class TestNiciraL3ExtensionManager(object):
def get_resources(self):
# Simulate extension of L3 attribute map
# First apply attribute extensions
for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
l3.RESOURCE_ATTRIBUTE_MAP[key].update(
l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
l3.RESOURCE_ATTRIBUTE_MAP[key].update(
dist_router.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
# Finally add l3 resources to the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
l3.RESOURCE_ATTRIBUTE_MAP)
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class NiciraL3NatTest(test_l3_plugin.L3BaseForIntTests,
NiciraPluginV2TestCase):
def _restore_l3_attribute_map(self):
l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
def setUp(self, plugin=None, ext_mgr=None, service_plugins=None):
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
cfg.CONF.set_override('api_extensions_path', NVPEXT_PATH)
self.addCleanup(self._restore_l3_attribute_map)
ext_mgr = ext_mgr or TestNiciraL3ExtensionManager()
super(NiciraL3NatTest, self).setUp(
plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)
plugin_instance = NeutronManager.get_plugin()
self._plugin_name = "%s.%s" % (
plugin_instance.__module__,
plugin_instance.__class__.__name__)
self._plugin_class = plugin_instance.__class__
class TestNiciraL3NatTestCase(NiciraL3NatTest,
test_l3_plugin.L3NatDBIntTestCase,
NiciraPluginV2TestCase):
def _create_l3_ext_network(self, vlan_id=None):
name = 'l3_ext_net'
net_type = NeutronPlugin.NetworkTypes.L3_EXT
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK: 'l3_gw_uuid'}
if vlan_id:
providernet_args[pnet.SEGMENTATION_ID] = vlan_id
return self.network(name=name,
router__external=True,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID))
def _test_create_l3_ext_network(self, vlan_id=None):
name = 'l3_ext_net'
net_type = NeutronPlugin.NetworkTypes.L3_EXT
expected = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', 'ACTIVE'), ('shared', False),
(external_net.EXTERNAL, True),
(pnet.NETWORK_TYPE, net_type),
(pnet.PHYSICAL_NETWORK, 'l3_gw_uuid'),
(pnet.SEGMENTATION_ID, vlan_id)]
with self._create_l3_ext_network(vlan_id) as net:
for k, v in expected:
self.assertEqual(net['network'][k], v)
def _nvp_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id):
"""Verify data on fake NVP API client in order to validate
plugin did set them properly
"""
ports = [port for port in self.fc._fake_lrouter_lport_dict.values()
if (port['lr_uuid'] == router_id and
port['att_type'] == "L3GatewayAttachment")]
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid)
self.assertEqual(ports[0].get('vlan_id'), vlan_id)
def test_create_l3_ext_network_without_vlan(self):
self._test_create_l3_ext_network()
def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None,
validate_ext_gw=True):
with self._create_l3_ext_network(vlan_id) as net:
with self.subnet(network=net) as s:
data = {'router': {'tenant_id': 'whatever'}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data,
self.fmt)
try:
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self.assertEqual(
s['subnet']['network_id'],
(router['router']['external_gateway_info']
['network_id']))
if validate_ext_gw:
self._nvp_validate_ext_gw(router['router']['id'],
'l3_gw_uuid', vlan_id)
finally:
self._delete('routers', router['router']['id'])
def test_router_create_with_gwinfo_and_l3_ext_net(self):
self._test_router_create_with_gwinfo_and_l3_ext_net()
def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self):
self._test_router_create_with_gwinfo_and_l3_ext_net(444)
def _test_router_create_with_distributed(self, dist_input, dist_expected,
version='3.1', return_code=201):
self.mock_instance.return_value.get_nvp_version.return_value = (
NvpApiClient.NVPVersion(version))
data = {'tenant_id': 'whatever'}
data['name'] = 'router1'
data['distributed'] = dist_input
router_req = self.new_create_request(
'routers', {'router': data}, self.fmt)
try:
res = router_req.get_response(self.ext_api)
self.assertEqual(return_code, res.status_int)
if res.status_int == 201:
router = self.deserialize(self.fmt, res)
self.assertIn('distributed', router['router'])
self.assertEqual(dist_expected,
router['router']['distributed'])
finally:
if res.status_int == 201:
self._delete('routers', router['router']['id'])
def test_router_create_distributed_with_3_1(self):
self._test_router_create_with_distributed(True, True)
def test_router_create_distributed_with_new_nvp_versions(self):
with mock.patch.object(nvplib, 'create_explicit_route_lrouter'):
self._test_router_create_with_distributed(True, True, '3.2')
self._test_router_create_with_distributed(True, True, '4.0')
self._test_router_create_with_distributed(True, True, '4.1')
def test_router_create_not_distributed(self):
self._test_router_create_with_distributed(False, False)
def test_router_create_distributed_unspecified(self):
self._test_router_create_with_distributed(None, False)
def test_router_create_distributed_returns_400(self):
self._test_router_create_with_distributed(True, None, '3.0', 400)
def test_router_create_on_obsolete_platform(self):
def obsolete_response(*args, **kwargs):
response = nvplib._create_implicit_routing_lrouter(*args, **kwargs)
response.pop('distributed')
return response
with mock.patch.object(
nvplib, 'create_lrouter', new=obsolete_response):
self._test_router_create_with_distributed(None, False, '2.2')
def test_router_create_nvp_error_returns_500(self, vlan_id=None):
with mock.patch.object(nvplib,
'create_router_lport',
side_effect=NvpApiClient.NvpApiException):
with self._create_l3_ext_network(vlan_id) as net:
with self.subnet(network=net) as s:
data = {'router': {'tenant_id': 'whatever'}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request(
'routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
self.assertEqual(500, res.status_int)
def test_router_add_gateway_invalid_network_returns_404(self):
# NOTE(salv-orlando): This unit test has been overriden
# as the nicira plugin support the ext_gw_mode extension
# which mandates a uuid for the external network identifier
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
uuidutils.generate_uuid(),
expected_code=webob.exc.HTTPNotFound.code)
def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None,
validate_ext_gw=True):
with self.router() as r:
with self.subnet() as s1:
with self._create_l3_ext_network(vlan_id) as net:
with self.subnet(network=net) as s2:
self._set_net_external(s1['subnet']['network_id'])
try:
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id,
s1['subnet']['network_id'])
# Plug network with external mapping
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id,
s2['subnet']['network_id'])
if validate_ext_gw:
self._nvp_validate_ext_gw(
body['router']['id'],
'l3_gw_uuid', vlan_id)
finally:
# Cleanup
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_router_update_gateway_on_l3_ext_net(self):
self._test_router_update_gateway_on_l3_ext_net()
def test_router_update_gateway_on_l3_ext_net_with_vlan(self):
self._test_router_update_gateway_on_l3_ext_net(444)
def test_router_list_by_tenant_id(self):
with contextlib.nested(self.router(tenant_id='custom'),
self.router(),
self.router()
) as routers:
self._test_list_resources('router', [routers[0]],
query_params="tenant_id=custom")
def test_create_l3_ext_network_with_vlan(self):
self._test_create_l3_ext_network(666)
def test_floatingip_with_assoc_fails(self):
self._test_floatingip_with_assoc_fails(self._plugin_name)
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(self._plugin_name)
def _nvp_metadata_setup(self):
cfg.CONF.set_override('metadata_mode', 'access_network', 'NVP')
def _nvp_metadata_teardown(self):
cfg.CONF.set_override('metadata_mode', None, 'NVP')
def test_create_router_name_exceeds_40_chars(self):
name = 'this_is_a_router_whose_name_is_longer_than_40_chars'
with self.router(name=name) as rtr:
# Assert Neutron name is not truncated
self.assertEqual(rtr['router']['name'], name)
def test_router_add_interface_subnet_with_metadata_access(self):
self._nvp_metadata_setup()
self.test_router_add_interface_subnet()
self._nvp_metadata_teardown()
def test_router_add_interface_port_with_metadata_access(self):
self._nvp_metadata_setup()
self.test_router_add_interface_port()
self._nvp_metadata_teardown()
def test_router_add_interface_dupsubnet_returns_400_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_add_interface_dup_subnet1_returns_400()
self._nvp_metadata_teardown()
def test_router_add_interface_overlapped_cidr_returns_400_with(self):
self._nvp_metadata_setup()
self.test_router_add_interface_overlapped_cidr_returns_400()
self._nvp_metadata_teardown()
def test_router_remove_interface_inuse_returns_409_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_remove_interface_inuse_returns_409()
self._nvp_metadata_teardown()
def test_router_remove_iface_wrong_sub_returns_400_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_remove_interface_wrong_subnet_returns_400()
self._nvp_metadata_teardown()
def test_router_delete_with_metadata_access(self):
self._nvp_metadata_setup()
self.test_router_delete()
self._nvp_metadata_teardown()
def test_router_delete_with_port_existed_returns_409_with_metadata(self):
self._nvp_metadata_setup()
self.test_router_delete_with_port_existed_returns_409()
self._nvp_metadata_teardown()
def test_metadatata_network_created_with_router_interface_add(self):
self._nvp_metadata_setup()
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
r_ports = self._list('ports')['ports']
self.assertEqual(len(r_ports), 2)
ips = []
for port in r_ports:
ips.extend([netaddr.IPAddress(fixed_ip['ip_address'])
for fixed_ip in port['fixed_ips']])
meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
self.assertTrue(any([ip in meta_cidr for ip in ips]))
# Needed to avoid 409
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
self._nvp_metadata_teardown()
def test_metadata_network_create_rollback_on_create_subnet_failure(self):
self._nvp_metadata_setup()
with self.router() as r:
with self.subnet() as s:
# Raise a NeutronException (eg: NotFound)
with mock.patch.object(self._plugin_class,
'create_subnet',
side_effect=ntn_exc.NotFound):
self._router_interface_action(
'add', r['router']['id'], s['subnet']['id'], None)
# Ensure metadata network was removed
nets = self._list('networks')['networks']
self.assertEqual(len(nets), 1)
# Needed to avoid 409
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
self._nvp_metadata_teardown()
def test_metadata_network_create_rollback_on_add_rtr_iface_failure(self):
self._nvp_metadata_setup()
with self.router() as r:
with self.subnet() as s:
# Raise a NeutronException when adding metadata subnet
# to router
# save function being mocked
real_func = self._plugin_class.add_router_interface
plugin_instance = manager.NeutronManager.get_plugin()
def side_effect(*args):
if args[-1]['subnet_id'] == s['subnet']['id']:
# do the real thing
return real_func(plugin_instance, *args)
# otherwise raise
raise NvpApiClient.NvpApiException()
with mock.patch.object(self._plugin_class,
'add_router_interface',
side_effect=side_effect):
self._router_interface_action(
'add', r['router']['id'], s['subnet']['id'], None)
# Ensure metadata network was removed
nets = self._list('networks')['networks']
self.assertEqual(len(nets), 1)
# Needed to avoid 409
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
self._nvp_metadata_teardown()
def test_metadata_network_removed_with_router_interface_remove(self):
self._nvp_metadata_setup()
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add', r['router']['id'],
s['subnet']['id'], None)
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 2)
meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
for subnet in subnets:
cidr = netaddr.IPNetwork(subnet['cidr'])
if meta_cidr == cidr or meta_cidr in cidr.supernet(16):
meta_sub_id = subnet['id']
meta_net_id = subnet['network_id']
ports = self._list(
'ports',
query_params='network_id=%s' % meta_net_id)['ports']
self.assertEqual(len(ports), 1)
meta_port_id = ports[0]['id']
self._router_interface_action('remove', r['router']['id'],
s['subnet']['id'], None)
self._show('networks', meta_net_id,
webob.exc.HTTPNotFound.code)
self._show('ports', meta_port_id,
webob.exc.HTTPNotFound.code)
self._show('subnets', meta_sub_id,
webob.exc.HTTPNotFound.code)
self._nvp_metadata_teardown()
def test_metadata_network_remove_rollback_on_failure(self):
self._nvp_metadata_setup()
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add', r['router']['id'],
s['subnet']['id'], None)
networks = self._list('networks')['networks']
for network in networks:
if network['id'] != s['subnet']['network_id']:
meta_net_id = network['id']
ports = self._list(
'ports',
query_params='network_id=%s' % meta_net_id)['ports']
meta_port_id = ports[0]['id']
# Raise a NeutronException when removing
# metadata subnet from router
# save function being mocked
real_func = self._plugin_class.remove_router_interface
plugin_instance = manager.NeutronManager.get_plugin()
def side_effect(*args):
if args[-1].get('subnet_id') == s['subnet']['id']:
# do the real thing
return real_func(plugin_instance, *args)
# otherwise raise
raise NvpApiClient.NvpApiException()
with mock.patch.object(self._plugin_class,
'remove_router_interface',
side_effect=side_effect):
self._router_interface_action('remove', r['router']['id'],
s['subnet']['id'], None)
# Metadata network and subnet should still be there
self._show('networks', meta_net_id,
webob.exc.HTTPOk.code)
self._show('ports', meta_port_id,
webob.exc.HTTPOk.code)
self._nvp_metadata_teardown()
def test_metadata_dhcp_host_route(self):
cfg.CONF.set_override('metadata_mode', 'dhcp_host_route', 'NVP')
subnets = self._list('subnets')['subnets']
with self.subnet() as s:
with self.port(subnet=s, device_id='1234',
device_owner='network:dhcp'):
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 1)
self.assertEqual(subnets[0]['host_routes'][0]['nexthop'],
'10.0.0.2')
self.assertEqual(subnets[0]['host_routes'][0]['destination'],
'169.254.169.254/32')
subnets = self._list('subnets')['subnets']
# Test that route is deleted after dhcp port is removed
self.assertEqual(len(subnets[0]['host_routes']), 0)
def test_floatingip_disassociate(self):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.floatingip_no_assoc(private_sub) as fip:
port_id = p['port']['id']
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': port_id}})
self.assertEqual(body['floatingip']['port_id'], port_id)
# Disassociate
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': None}})
body = self._show('floatingips', fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(body['floatingip']['fixed_ip_address'])
def test_create_router_maintenance_returns_503(self):
with self._create_l3_ext_network() as net:
with self.subnet(network=net) as s:
with mock.patch.object(
nvplib,
'do_request',
side_effect=nvp_exc.MaintenanceInProgress):
data = {'router': {'tenant_id': 'whatever'}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request(
'routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
class NvpQoSTestExtensionManager(object):
def get_resources(self):
return ext_qos.Nvp_qos.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestNiciraQoSQueue(NiciraPluginV2TestCase):
def setUp(self, plugin=None):
cfg.CONF.set_override('api_extensions_path', NVPEXT_PATH)
super(TestNiciraQoSQueue, self).setUp()
ext_mgr = NvpQoSTestExtensionManager()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
def _create_qos_queue(self, fmt, body, **kwargs):
qos_queue = self.new_create_request('qos-queues', body)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
qos_queue.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
return qos_queue.get_response(self.ext_api)
@contextlib.contextmanager
def qos_queue(self, name='foo', min='0', max='10',
qos_marking=None, dscp='0', default=None, no_delete=False):
body = {'qos_queue': {'tenant_id': 'tenant',
'name': name,
'min': min,
'max': max}}
if qos_marking:
body['qos_queue']['qos_marking'] = qos_marking
if dscp:
body['qos_queue']['dscp'] = dscp
if default:
body['qos_queue']['default'] = default
res = self._create_qos_queue('json', body)
qos_queue = self.deserialize('json', res)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
try:
yield qos_queue
finally:
if not no_delete:
self._delete('qos-queues',
qos_queue['qos_queue']['id'])
def test_create_qos_queue(self):
with self.qos_queue(name='fake_lqueue', min=34, max=44,
qos_marking='untrusted', default=False) as q:
self.assertEqual(q['qos_queue']['name'], 'fake_lqueue')
self.assertEqual(q['qos_queue']['min'], 34)
self.assertEqual(q['qos_queue']['max'], 44)
self.assertEqual(q['qos_queue']['qos_marking'], 'untrusted')
self.assertFalse(q['qos_queue']['default'])
def test_create_trusted_qos_queue(self):
with mock.patch.object(qos_db.LOG, 'info') as log:
with mock.patch.object(nvplib, 'do_request',
return_value={"uuid": "fake_queue"}):
with self.qos_queue(name='fake_lqueue', min=34, max=44,
qos_marking='trusted', default=False) as q:
self.assertEqual(q['qos_queue']['dscp'], None)
self.assertTrue(log.called)
def test_create_qos_queue_name_exceeds_40_chars(self):
name = 'this_is_a_queue_whose_name_is_longer_than_40_chars'
with self.qos_queue(name=name) as queue:
# Assert Neutron name is not truncated
self.assertEqual(queue['qos_queue']['name'], name)
def test_create_qos_queue_default(self):
with self.qos_queue(default=True) as q:
self.assertTrue(q['qos_queue']['default'])
def test_create_qos_queue_two_default_queues_fail(self):
with self.qos_queue(default=True):
body = {'qos_queue': {'tenant_id': 'tenant',
'name': 'second_default_queue',
'default': True}}
res = self._create_qos_queue('json', body)
self.assertEqual(res.status_int, 409)
def test_create_port_with_queue(self):
with self.qos_queue(default=True) as q1:
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE,),
queue_id=q1['qos_queue']['id'])
net1 = self.deserialize('json', res)
self.assertEqual(net1['network'][ext_qos.QUEUE],
q1['qos_queue']['id'])
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
with self.port(device_id=device_id, do_delete=False) as p:
self.assertEqual(len(p['port'][ext_qos.QUEUE]), 36)
def test_create_shared_queue_networks(self):
with self.qos_queue(default=True, no_delete=True) as q1:
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE,),
queue_id=q1['qos_queue']['id'])
net1 = self.deserialize('json', res)
self.assertEqual(net1['network'][ext_qos.QUEUE],
q1['qos_queue']['id'])
res = self._create_network('json', 'net2', True,
arg_list=(ext_qos.QUEUE,),
queue_id=q1['qos_queue']['id'])
net2 = self.deserialize('json', res)
self.assertEqual(net1['network'][ext_qos.QUEUE],
q1['qos_queue']['id'])
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
res = self._create_port('json', net1['network']['id'],
device_id=device_id)
port1 = self.deserialize('json', res)
res = self._create_port('json', net2['network']['id'],
device_id=device_id)
port2 = self.deserialize('json', res)
self.assertEqual(port1['port'][ext_qos.QUEUE],
port2['port'][ext_qos.QUEUE])
self._delete('ports', port1['port']['id'])
self._delete('ports', port2['port']['id'])
def test_remove_queue_in_use_fail(self):
with self.qos_queue(no_delete=True) as q1:
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE,),
queue_id=q1['qos_queue']['id'])
net1 = self.deserialize('json', res)
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
res = self._create_port('json', net1['network']['id'],
device_id=device_id)
port = self.deserialize('json', res)
self._delete('qos-queues', port['port'][ext_qos.QUEUE], 409)
def test_update_network_new_queue(self):
with self.qos_queue() as q1:
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE,),
queue_id=q1['qos_queue']['id'])
net1 = self.deserialize('json', res)
with self.qos_queue() as new_q:
data = {'network': {ext_qos.QUEUE: new_q['qos_queue']['id']}}
req = self.new_update_request('networks', data,
net1['network']['id'])
res = req.get_response(self.api)
net1 = self.deserialize('json', res)
self.assertEqual(net1['network'][ext_qos.QUEUE],
new_q['qos_queue']['id'])
def test_update_port_adding_device_id(self):
with self.qos_queue(no_delete=True) as q1:
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE,),
queue_id=q1['qos_queue']['id'])
net1 = self.deserialize('json', res)
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][ext_qos.QUEUE], None)
data = {'port': {'device_id': device_id}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
port = self.deserialize('json', res)
self.assertEqual(len(port['port'][ext_qos.QUEUE]), 36)
def test_get_port_with_qos_not_admin(self):
body = {'qos_queue': {'tenant_id': 'not_admin',
'name': 'foo', 'min': 20, 'max': 20}}
res = self._create_qos_queue('json', body, tenant_id='not_admin')
q1 = self.deserialize('json', res)
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE, 'tenant_id',),
queue_id=q1['qos_queue']['id'],
tenant_id="not_admin")
net1 = self.deserialize('json', res)
self.assertEqual(len(net1['network'][ext_qos.QUEUE]), 36)
res = self._create_port('json', net1['network']['id'],
tenant_id='not_admin', set_context=True)
port = self.deserialize('json', res)
self.assertEqual(ext_qos.QUEUE not in port['port'], True)
def test_dscp_value_out_of_range(self):
body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '64',
'name': 'foo', 'min': 20, 'max': 20}}
res = self._create_qos_queue('json', body)
self.assertEqual(res.status_int, 400)
def test_non_admin_cannot_create_queue(self):
body = {'qos_queue': {'tenant_id': 'not_admin',
'name': 'foo', 'min': 20, 'max': 20}}
res = self._create_qos_queue('json', body, tenant_id='not_admin',
set_context=True)
self.assertEqual(res.status_int, 403)
def test_update_port_non_admin_does_not_show_queue_id(self):
body = {'qos_queue': {'tenant_id': 'not_admin',
'name': 'foo', 'min': 20, 'max': 20}}
res = self._create_qos_queue('json', body, tenant_id='not_admin')
q1 = self.deserialize('json', res)
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE,),
tenant_id='not_admin',
queue_id=q1['qos_queue']['id'])
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'],
tenant_id='not_admin', set_context=True)
port = self.deserialize('json', res)
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
data = {'port': {'device_id': device_id}}
neutron_context = context.Context('', 'not_admin')
port = self._update('ports', port['port']['id'], data,
neutron_context=neutron_context)
self.assertFalse(ext_qos.QUEUE in port['port'])
def test_rxtx_factor(self):
with self.qos_queue(max=10) as q1:
res = self._create_network('json', 'net1', True,
arg_list=(ext_qos.QUEUE,),
queue_id=q1['qos_queue']['id'])
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'],
arg_list=(ext_qos.RXTX_FACTOR,),
rxtx_factor=2, device_id='1')
port = self.deserialize('json', res)
req = self.new_show_request('qos-queues',
port['port'][ext_qos.QUEUE])
res = req.get_response(self.ext_api)
queue = self.deserialize('json', res)
self.assertEqual(queue['qos_queue']['max'], 20)
class NiciraExtGwModeTestCase(NiciraPluginV2TestCase,
test_ext_gw_mode.ExtGwModeIntTestCase):
pass
class NiciraNeutronNVPOutOfSync(NiciraPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin):
def setUp(self):
ext_mgr = test_l3_plugin.L3TestExtensionManager()
test_lib.test_config['extension_manager'] = ext_mgr
super(NiciraNeutronNVPOutOfSync, self).setUp()
def test_delete_network_not_in_nvp(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
req = self.new_delete_request('networks', net1['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_show_network_not_in_nvp(self):
res = self._create_network('json', 'net1', True)
net = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
req = self.new_show_request('networks', net['network']['id'],
fields=['id', 'status'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network']['status'],
constants.NET_STATUS_ERROR)
def test_delete_port_not_in_nvp(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_lport_dict.clear()
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_show_port_not_in_nvp(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_lport_dict.clear()
self.fc._fake_lswitch_lportstatus_dict.clear()
req = self.new_show_request('ports', port['port']['id'],
fields=['id', 'status'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['port']['status'],
constants.PORT_STATUS_ERROR)
def test_create_port_on_network_not_in_nvp(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR)
def test_update_port_not_in_nvp(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_lport_dict.clear()
data = {'port': {'name': 'error_port'}}
req = self.new_update_request('ports', data, port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR)
self.assertEqual(port['port']['name'], 'error_port')
def test_delete_port_and_network_not_in_nvp(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
self.fc._fake_lswitch_lport_dict.clear()
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
req = self.new_delete_request('networks', net1['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_delete_router_not_in_nvp(self):
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
self.fc._fake_lrouter_dict.clear()
req = self.new_delete_request('routers', router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_router_not_in_nvp(self):
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
self.fc._fake_lrouter_dict.clear()
req = self.new_show_request('routers', router['router']['id'],
fields=['id', 'status'])
router = self.deserialize('json', req.get_response(self.ext_api))
self.assertEqual(router['router']['status'],
constants.NET_STATUS_ERROR)
def _create_network_and_subnet(self, cidr, external=False):
net_res = self._create_network('json', 'ext_net', True)
net = self.deserialize('json', net_res)
net_id = net['network']['id']
if external:
self._update('networks', net_id,
{'network': {external_net.EXTERNAL: True}})
sub_res = self._create_subnet('json', net_id, cidr)
sub = self.deserialize('json', sub_res)
return net_id, sub['subnet']['id']
def test_clear_gateway_nat_rule_not_in_nvp(self):
# Create external network and subnet
ext_net_id = self._create_network_and_subnet('1.1.1.0/24', True)[0]
# Create internal network and subnet
int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1]
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
# Add interface to router (needed to generate NAT rule)
req = self.new_action_request(
'routers',
{'subnet_id': int_sub_id},
router['router']['id'],
"add_router_interface")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
# Set gateway for router
req = self.new_update_request(
'routers',
{'router': {'external_gateway_info':
{'network_id': ext_net_id}}},
router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
# Delete NAT rule from NVP, clear gateway
# and verify operation still succeeds
self.fc._fake_lrouter_nat_dict.clear()
req = self.new_update_request(
'routers',
{'router': {'external_gateway_info': {}}},
router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
def test_update_router_not_in_nvp(self):
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
self.fc._fake_lrouter_dict.clear()
req = self.new_update_request(
'routers',
{'router': {'name': 'goo'}},
router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 500)
req = self.new_show_request('routers', router['router']['id'])
router = self.deserialize('json', req.get_response(self.ext_api))
self.assertEqual(router['router']['status'],
constants.NET_STATUS_ERROR)
class TestNiciraNetworkGateway(test_l2_gw.NetworkGatewayDbTestCase,
NiciraPluginV2TestCase):
def setUp(self):
cfg.CONF.set_override('api_extensions_path', NVPEXT_PATH)
super(TestNiciraNetworkGateway, self).setUp()
def test_create_network_gateway_name_exceeds_40_chars(self):
name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars'
with self._network_gateway(name=name) as nw_gw:
# Assert Neutron name is not truncated
self.assertEqual(nw_gw[self.resource]['name'], name)
def test_update_network_gateway_with_name_calls_backend(self):
with mock.patch.object(
nvplib, 'update_l2_gw_service') as mock_update_gw:
with self._network_gateway(name='cavani') as nw_gw:
nw_gw_id = nw_gw[self.resource]['id']
self._update(nvp_networkgw.COLLECTION_NAME, nw_gw_id,
{self.resource: {'name': 'higuain'}})
mock_update_gw.assert_called_once_with(
mock.ANY, nw_gw_id, 'higuain')
def test_update_network_gateway_without_name_does_not_call_backend(self):
with mock.patch.object(
nvplib, 'update_l2_gw_service') as mock_update_gw:
with self._network_gateway(name='something') as nw_gw:
nw_gw_id = nw_gw[self.resource]['id']
self._update(nvp_networkgw.COLLECTION_NAME, nw_gw_id,
{self.resource: {}})
self.assertEqual(mock_update_gw.call_count, 0)
def test_update_network_gateway_name_exceeds_40_chars(self):
new_name = 'this_is_a_gateway_whose_name_is_longer_than_40_chars'
with self._network_gateway(name='something') as nw_gw:
nw_gw_id = nw_gw[self.resource]['id']
self._update(nvp_networkgw.COLLECTION_NAME, nw_gw_id,
{self.resource: {'name': new_name}})
req = self.new_show_request(nvp_networkgw.COLLECTION_NAME,
nw_gw_id)
res = self.deserialize('json', req.get_response(self.ext_api))
# Assert Neutron name is not truncated
self.assertEqual(new_name, res[self.resource]['name'])
# Assert NVP name is truncated
self.assertEqual(
new_name[:40],
self.fc._fake_gatewayservice_dict[nw_gw_id]['display_name'])
def test_create_network_gateway_nvp_error_returns_500(self):
def raise_nvp_api_exc(*args, **kwargs):
raise NvpApiClient.NvpApiException
with mock.patch.object(nvplib,
'create_l2_gw_service',
new=raise_nvp_api_exc):
res = self._create_network_gateway(
self.fmt, 'xxx', name='yyy',
devices=[{'id': uuidutils.generate_uuid()}])
self.assertEqual(500, res.status_int)
def test_create_network_gateway_nvp_error_returns_409(self):
with mock.patch.object(nvplib,
'create_l2_gw_service',
side_effect=NvpApiClient.Conflict):
res = self._create_network_gateway(
self.fmt, 'xxx', name='yyy',
devices=[{'id': uuidutils.generate_uuid()}])
self.assertEqual(409, res.status_int)
def test_list_network_gateways(self):
with self._network_gateway(name='test-gw-1') as gw1:
with self._network_gateway(name='test_gw_2') as gw2:
req = self.new_list_request(nvp_networkgw.COLLECTION_NAME)
res = self.deserialize('json', req.get_response(self.ext_api))
# We expect the default gateway too
key = self.resource + 's'
self.assertEqual(len(res[key]), 3)
self.assertEqual(res[key][0]['default'],
True)
self.assertEqual(res[key][1]['name'],
gw1[self.resource]['name'])
self.assertEqual(res[key][2]['name'],
gw2[self.resource]['name'])
def test_list_network_gateway_with_multiple_connections(self):
self._test_list_network_gateway_with_multiple_connections(
expected_gateways=2)
def test_delete_network_gateway(self):
# The default gateway must still be there
self._test_delete_network_gateway(1)
class TestNiciraMultiProviderNetworks(NiciraPluginV2TestCase):
def setUp(self, plugin=None):
cfg.CONF.set_override('api_extensions_path', NVPEXT_PATH)
super(TestNiciraMultiProviderNetworks, self).setUp()
def test_create_network_provider(self):
data = {'network': {'name': 'net1',
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1)
self.assertNotIn(mpnet.SEGMENTS, network['network'])
def test_create_network_single_multiple_provider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
net_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt, net_req.get_response(self.api))
for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertTrue(provider_field not in network['network'])
tz = network['network'][mpnet.SEGMENTS][0]
self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS][0]
self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
def test_create_network_multprovider(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'stt',
pnet.PHYSICAL_NETWORK: 'physnet1'}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
network = self.deserialize(self.fmt,
network_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS]
for tz in data['network'][mpnet.SEGMENTS]:
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(tz.get(field), tz.get(field))
# Tests get_network()
net_req = self.new_show_request('networks', network['network']['id'])
network = self.deserialize(self.fmt, net_req.get_response(self.api))
tz = network['network'][mpnet.SEGMENTS]
for tz in data['network'][mpnet.SEGMENTS]:
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID]:
self.assertEqual(tz.get(field), tz.get(field))
def test_create_network_with_provider_and_multiprovider_fail(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1,
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(res.status_int, 400)
def test_create_network_duplicate_segments(self):
data = {'network': {'name': 'net1',
mpnet.SEGMENTS:
[{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1},
{pnet.NETWORK_TYPE: 'vlan',
pnet.PHYSICAL_NETWORK: 'physnet1',
pnet.SEGMENTATION_ID: 1}],
'tenant_id': 'tenant_one'}}
network_req = self.new_create_request('networks', data)
res = network_req.get_response(self.api)
self.assertEqual(res.status_int, 400)
| |
# -*- coding: utf-8 -*-
"""
engineauth.models
====================================
Auth related models.
:copyright: 2011 by Rodrigo Moraes.
:license: Apache Sotware License, see LICENSE for details.
:copyright: 2011 by tipfy.org.
:license: Apache Sotware License, see LICENSE for details.
"""
from engineauth import config
from google.appengine.ext import ndb
from webapp2_extras import securecookie
from webapp2_extras import security
class Error(Exception):
"""Base user exception."""
class DuplicatePropertyError(Error):
def __init__(self, value):
self.values = value
self.msg = u'duplicate properties(s) were found.'
class UserProfile(ndb.Expando):
"""
``ndb.Expando`` is used to store the user_info object as well as
any additional information specific to a strategy.
"""
_default_indexed = False
user_info = ndb.JsonProperty(indexed=False, compressed=True)
credentials = ndb.PickleProperty(indexed=False)
@classmethod
def get_or_create(cls, auth_id, user_info, **kwargs):
"""
"""
profile = cls.get_by_id(auth_id)
if profile is None:
profile = cls(id=auth_id)
profile.user_info = user_info
profile.populate(**kwargs)
profile.put()
return profile
class UserToken(ndb.Model):
"""Stores validation tokens for users."""
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
user = ndb.StringProperty(required=True, indexed=False)
subject = ndb.StringProperty(required=True)
token = ndb.StringProperty(required=True)
@classmethod
def get_key(cls, user, subject, token):
"""Returns a token key.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
Randomly generated token.
:returns:
``model.Key`` containing a string id in the following format:
``{user_id}.{subject}.{token}``
"""
return ndb.Key(cls, '%s.%s.%s' % (str(user), subject, token))
@classmethod
def create(cls, user, subject, token=None):
"""Creates a new token for the given user.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
Optionally an existing token may be provided.
If None, a random token will be generated.
:returns:
The newly created :class:`UserToken`.
"""
user = str(user)
token = token or security.generate_random_string(entropy=128)
key = cls.get_key(user, subject, token)
entity = cls(key=key, user=user, subject=subject, token=token)
entity.put()
return entity
@classmethod
def get(cls, user=None, subject=None, token=None):
"""Fetches a user token.
:param user:
User unique ID.
:param subject:
The subject of the key. Examples:
- 'auth'
- 'signup'
:param token:
The existing token needing verified.
:returns:
A :class:`UserToken` or None if the token does not exist.
"""
if user and subject and token:
return cls.get_key(user, subject, token).get()
assert subject and token, \
u'subject and token must be provided to UserToken.get().'
return cls.query(cls.subject == subject, cls.token == token).get()
class UserEmail(ndb.Model):
user_id = ndb.StringProperty(indexed=True)
value = ndb.StringProperty(indexed=True)
type = ndb.StringProperty(indexed=False)
primary = ndb.BooleanProperty(default=False, indexed=False)
verified = ndb.BooleanProperty(default=False, indexed=True)
@classmethod
def create(cls, address, user_id, primary=None, verified=None, type=None):
address = address.lower()
email = cls.get_by_id(address)
if email is not None and email.user_id != user_id:
raise DuplicatePropertyError(['email'])
email = cls(id=address,
value=address,
user_id=user_id,
primary=primary,
verified=verified,
type=type)
email.put()
return cls
@classmethod
def get_by_user(cls, user_id):
user_id = str(user_id)
return cls.query(cls.user_id == user_id).fetch(25)
@classmethod
def get_by_emails(cls, addresses):
assert isinstance(addresses, list), 'Email addresses must be a list'
if not addresses: return None
results = cls.query(cls.value.IN(addresses)).fetch(25)
return results or None
class User(ndb.Expando):
"""Stores user authentication credentials or authorization ids."""
email_model = UserEmail
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
# ID for third party authentication, e.g. 'google:username'. UNIQUE.
auth_ids = ndb.StringProperty(repeated=True)
# primary email address used for
email = ndb.StringProperty(indexed=False)
authenticated = ndb.BooleanProperty(default=False)
def get_id(self):
"""Returns this user's unique ID, which can be an integer or string."""
return str(self.key.id())
@staticmethod
def generate_auth_id(provider, uid, subprovider=None):
"""Standardized generator for auth_ids
:param provider:
A String representing the provider of the id.
E.g.
- 'google'
- 'facebook'
- 'appengine_openid'
- 'twitter'
:param uid:
A String representing a unique id generated by the Provider.
I.e. a user id.
:param subprovider:
An Optional String representing a more granular subdivision of a provider.
i.e. a appengine_openid has subproviders for Google, Yahoo, AOL etc.
:return:
A concatenated String in the following form:
'{provider}#{subprovider}:{uid}'
E.g.
- 'facebook:1111111111'
- 'twitter:1111111111'
- 'appengine_google#yahoo:1111111111'
- 'appengine_google#google:1111111111'
"""
if subprovider is not None:
provider = '{0}#{1}'.format(provider, subprovider)
return '{0}:{1}'.format(provider, uid)
def _add_auth_id(self, auth_id):
"""A helper method to add additional auth ids to a User
:param auth_id:
String representing a unique id for the user. Examples:
- own:username
- google:username
:returns:
A tuple (boolean, info). The boolean indicates if the user
was saved. If creation succeeds, ``info`` is the user entity;
otherwise it is a list of duplicated unique properties that
caused creation to fail.
"""
# If the auth_id is already in the list return True
if auth_id in self.auth_ids:
return self
if self.__class__.get_by_auth_id(auth_id):
raise DuplicatePropertyError(value=['auth_id'])
else:
self.auth_ids.append(auth_id)
self.put()
return self
@classmethod
def _get_by_auth_id(cls, auth_id):
"""Returns a user object based on a auth_id.
:param auth_id:
String representing a unique id for the user. Examples:
- own:username
- google:username
:returns:
A user object.
"""
return cls.query(cls.auth_ids == auth_id).get()
get_by_auth_id = _get_by_auth_id
def get_emails(self):
return self.email_model.get_by_user(self.get_id())
def add_email(self, value, primary=None, verified=None, type=None):
return self.email_model.create(value, self.get_id(), primary=primary,
verified=verified, type=type)
# def _has_email(self, email):
# """Convenience method that checks if a User has the provided email.
#
# :param email:
# A String representing the email to check for
# :return:
# True if email is present, else False
# """
# for e in self.emails:
# if e.value == email:
# return True
# return False
#
# def _add_email(self, value, type=u'home', primary=False, verified=False):
# """Adds and email address to User
#
# :param value:
# A String representing the email address
# :param type:
# A String representing the type of email.
# E.g.
# - 'home'
# - 'work'
# - 'other'
# default: 'home'
# :param primary:
# A Boolean indicting weather or not the email should be
# used for communication
# default: False
# :param verified:
# A Boolean indicting weather or not the email has been
# verified to be an active address owned by the User
# default: False
# :return:
# User object if the add succeeds
# :raise:
# ExistingAccountError is raised if the email address is
# already in the system user a different User account
# """
# if not value:
# return self
# value = value.lower()
# # check if the user has already added the address
# if self._has_email(value):
# return self
# # check for accounts using address
# if self.__class__().get_by_email(value):
# raise DuplicatePropertyError(value=['email'])
# email = self.email_model(value=value, type=type,
# primary=primary, verified=verified)
# self.emails.append(email)
## self.put()
# return self
#
# def _add_emails(self, emails):
# assert isinstance(emails, list), 'Emails must be a list'
# for email in emails:
# pass
#
# @classmethod
# def _get_by_emails(cls, emails):
# """Returns the first User by email address
#
# :param emails:
# List of email addresses to search by
# :return:
# A User object
# """
# assert isinstance(emails, list), 'Emails must be a list'
# email = emails.lower()
# return cls.query(cls.emails.value == email).get()
@classmethod
def _find_user(cls, auth_id, emails=None):
"""Find User by auth_id and optionally email address
:param auth_id:
A String representing a unique id to find the user by
:param emails:
Optional, list of email addresses to search by if auth_id
returns None
:return: A User by auth_id and optionally email
"""
user = cls.get_by_auth_id(auth_id)
if user is None and emails:
# TODO: email should only be trusted if it is verified.
assert isinstance(emails, list), 'Emails must be a list'
address = [e['value'] for e in emails]
user = cls.email_model.get_by_emails(address)
return user
@classmethod
def _create_user(cls, auth_ids, **user_values):
"""Creates a new user.
:param auth_id:
A string that is unique to the user. Users may have multiple
auth ids. Example auth ids:
- own:username
- own:email@example.com
- google:username
- yahoo:username
The value of `auth_id` must be unique.
:param user_values:
Keyword arguments to create a new user entity. Since the model is
an ``Expando``, any provided custom properties will be saved.
To hash a plain password, pass a keyword ``password_raw``.
:returns:
A tuple (boolean, info). The boolean indicates if the user
was created. If creation succeeds, ``info`` is the user entity;
otherwise it is a list of duplicated unique properties that
caused creation to fail.
"""
if not isinstance(auth_ids, list):
auth_ids = [auth_ids]
user_values['auth_ids'] = auth_ids
for auth_id in user_values['auth_ids']:
if cls.get_by_auth_id(auth_id):
raise DuplicatePropertyError(value=['auth_id'])
user = cls(**user_values)
user.put()
return user
create_user = _create_user
@classmethod
def _get_or_create(cls, auth_id, emails, **kwarg):
assert isinstance(emails, list), 'Emails must be a list'
user = cls._find_user(auth_id, emails)
# if user and emails is not None:
# user._add_emails(emails)
if user is None:
user = cls._create_user(auth_id, **kwarg)
return user
@classmethod
def get_or_create_by_profile(cls, profile):
assert isinstance(profile, UserProfile), \
'You must pass an instance of type engineauth.models.UserProfile.'
emails = profile.user_info.get('info').get('emails') or []
return cls._get_or_create(profile.key.id(), emails)
def add_profile(self, profile):
assert isinstance(profile, UserProfile),\
'You must pass an instance of type engineauth.models.UserProfile.'
return self._add_auth_id(profile.key.id())
class Session(ndb.Model):
session_id = ndb.StringProperty()
user_id = ndb.StringProperty()
updated = ndb.DateTimeProperty(auto_now=True)
data = ndb.PickleProperty(compressed=True, default={})
@staticmethod
def _generate_sid():
return security.generate_random_string(entropy=128)
@staticmethod
def _serializer():
engineauth_config = config.load_config()
return securecookie.SecureCookieSerializer(engineauth_config['secret_key'])
def hash(self):
"""
Creates a unique hash from the session.
This will be used to check for session changes.
:return: A unique hash for the session
"""
return hash(str(self))
def serialize(self):
values = self.to_dict(include=['session_id', 'user_id'])
return self._serializer().serialize('_eauth', values)
@classmethod
def deserialize(cls, value):
return cls._serializer().deserialize('_eauth', value)
@classmethod
def get_by_value(cls, value):
v = cls.deserialize(value)
sid = v.get('session_id')
return cls.get_by_sid(sid) if sid else None
@classmethod
def get_by_sid(cls, sid):
return cls.get_by_id(sid)
@classmethod
def upgrade_to_user_session(cls, session_id, user_id):
old_session = cls.get_by_sid(session_id)
new_session = cls.create(user_id=user_id, data=old_session.data)
old_session.key.delete()
return new_session
@classmethod
def get_by_user_id(cls, user_id):
# TODO: make sure that the user doesn't have multiple sessions
user_id = str(user_id)
return cls.query(cls.user_id == user_id).get()
@classmethod
def create(cls, user_id=None, **kwargs):
if user_id is None:
session_id = cls._generate_sid()
else:
session_id = user_id = str(user_id)
session = cls(id=session_id, session_id=session_id,
user_id=user_id, **kwargs)
session.put()
return session
@classmethod
def remove_inactive(cls, days_ago=30, now=None):
import datetime
# for testing we want to be able to pass a value for now.
now = now or datetime.datetime.now()
dtd = now + datetime.timedelta(-days_ago)
for s in cls.query(cls.updated < dtd).fetch():
s.key.delete()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import os
import shutil
import string
import logging
import subprocess
from subprocess import Popen, PIPE
from hashlib import sha1
from cStringIO import StringIO
from datetime import datetime
import tempfile
from shutil import rmtree
import tg
import pysvn
from paste.deploy.converters import asbool
from pymongo.errors import DuplicateKeyError
from pylons import tmpl_context as c, app_globals as g
from ming.base import Object
from ming.orm import Mapper, FieldProperty
from ming.utils import LazyProperty
from allura import model as M
from allura.lib import helpers as h
from allura.model.auth import User
from allura.model.repository import zipdir
log = logging.getLogger(__name__)
class Repository(M.Repository):
tool_name = 'SVN'
repo_id = 'svn'
type_s = 'SVN Repository'
class __mongometa__:
name = 'svn-repository'
branches = FieldProperty([dict(name=str, object_id=str)])
_refresh_precompute = False
@LazyProperty
def _impl(self):
return SVNImplementation(self)
def clone_command(self, category, username=''):
'''Return a string suitable for copy/paste that would clone this repo locally
category is one of 'ro' (read-only), 'rw' (read/write), or 'https' (read/write via https)
'''
if not username and c.user not in (None, User.anonymous()):
username = c.user.username
tpl = string.Template(tg.config.get('scm.clone.%s.%s' % (category, self.tool)) or
tg.config.get('scm.clone.%s' % self.tool))
return tpl.substitute(dict(username=username,
source_url=self.clone_url(
category, username) + c.app.config.options.get(
'checkout_url'),
dest_path=self.suggested_clone_dest_path()))
def compute_diffs(self):
return
def latest(self, branch=None):
if self._impl is None:
return None
return self._impl.commit('HEAD')
def tarball_filename(self, revision, path=None):
fn = super(Repository, self).tarball_filename(revision, path)
path = self._impl._path_to_root(path, revision)
fn += ('-' + '-'.join(path.split('/'))) if path else ''
return fn
def rev_to_commit_id(self, rev):
return self._impl.rev_parse(rev)
class SVNCalledProcessError(Exception):
def __init__(self, cmd, returncode, stdout, stderr):
self.cmd = cmd
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
return "Command: '%s' returned non-zero exit status %s\nSTDOUT: %s\nSTDERR: %s" % \
(self.cmd, self.returncode, self.stdout, self.stderr)
def svn_path_exists(path, rev=None):
svn = SVNLibWrapper(pysvn.Client())
if rev:
rev = pysvn.Revision(pysvn.opt_revision_kind.number, rev)
else:
rev = pysvn.Revision(pysvn.opt_revision_kind.head)
try:
svn.info2(path, revision=rev, recurse=False)
return True
except pysvn.ClientError:
return False
class SVNLibWrapper(object):
"""Wrapper around pysvn, used for instrumentation."""
def __init__(self, client):
self.client = client
def checkout(self, *args, **kw):
return self.client.checkout(*args, **kw)
def add(self, *args, **kw):
return self.client.add(*args, **kw)
def checkin(self, *args, **kw):
return self.client.checkin(*args, **kw)
def info2(self, *args, **kw):
return self.client.info2(*args, **kw)
def log(self, *args, **kw):
return self.client.log(*args, **kw)
def cat(self, *args, **kw):
return self.client.cat(*args, **kw)
def list(self, *args, **kw):
return self.client.list(*args, **kw)
def __getattr__(self, name):
return getattr(self.client, name)
class SVNImplementation(M.RepositoryImplementation):
post_receive_template = string.Template(
'#!/bin/bash\n'
'# The following is required for site integration, do not remove/modify.\n'
'# Place user hook code in post-commit-user and it will be called from here.\n'
'curl -s $url\n'
'\n'
'DIR="$$(dirname "$${BASH_SOURCE[0]}")"\n'
'if [ -x $$DIR/post-commit-user ]; then'
' exec $$DIR/post-commit-user "$$@"\n'
'fi')
def __init__(self, repo):
self._repo = repo
@LazyProperty
def _svn(self):
return SVNLibWrapper(pysvn.Client())
@LazyProperty
def _url(self):
return 'file://%s%s' % (self._repo.fs_path, self._repo.name)
def shorthand_for_commit(self, oid):
return '[r%d]' % self._revno(self.rev_parse(oid))
def url_for_commit(self, commit, url_type=None):
if hasattr(commit, '_id'):
object_id = commit._id
elif commit == self._repo.app.default_branch_name:
object_id = commit
else:
object_id = self.rev_parse(commit)
if ':' in object_id:
object_id = str(self._revno(object_id))
return os.path.join(self._repo.url(), object_id) + '/'
def init(self, default_dirs=False, skip_special_files=False):
fullname = self._setup_paths()
log.info('svn init %s', fullname)
if os.path.exists(fullname):
shutil.rmtree(fullname)
subprocess.call(['svnadmin', 'create', self._repo.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self._repo.fs_path)
if not skip_special_files:
self._setup_special_files()
self._repo.set_status('ready')
# make first commit with dir structure
if default_dirs:
tmp_working_dir = tempfile.mkdtemp(prefix='allura-svn-r1-',
dir=tg.config.get('scm.svn.tmpdir', g.tmpdir))
log.info('tmp dir = %s', tmp_working_dir)
self._repo._impl._svn.checkout(
'file://' + fullname, tmp_working_dir)
os.mkdir(tmp_working_dir + '/trunk')
os.mkdir(tmp_working_dir + '/tags')
os.mkdir(tmp_working_dir + '/branches')
self._repo._impl._svn.add(tmp_working_dir + '/trunk')
self._repo._impl._svn.add(tmp_working_dir + '/tags')
self._repo._impl._svn.add(tmp_working_dir + '/branches')
self._repo._impl._svn.checkin([tmp_working_dir + '/trunk',
tmp_working_dir + '/tags',
tmp_working_dir + '/branches'],
'Initial commit')
shutil.rmtree(tmp_working_dir)
log.info('deleted %s', tmp_working_dir)
def can_hotcopy(self, source_url):
if not (asbool(tg.config.get('scm.svn.hotcopy', True)) and
source_url.startswith('file://')):
return False
# check for svn version 1.7 or later
stdout, stderr = self.check_call(['svn', '--version'])
pattern = r'version (?P<maj>\d+)\.(?P<min>\d+)'
m = re.search(pattern, stdout)
return m and (int(m.group('maj')) * 10 + int(m.group('min'))) >= 17
def check_call(self, cmd):
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input='p\n')
if p.returncode != 0:
self._repo.set_status('ready')
raise SVNCalledProcessError(cmd, p.returncode, stdout, stderr)
return stdout, stderr
def clone_from(self, source_url):
'''Initialize a repo as a clone of another using svnsync'''
self.init(skip_special_files=True)
def set_hook(hook_name):
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', hook_name)
with open(fn, 'wb') as fp:
fp.write('#!/bin/sh\n')
os.chmod(fn, 0755)
def clear_hook(hook_name):
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', hook_name)
os.remove(fn)
self._repo.set_status('importing')
log.info('Initialize %r as a clone of %s',
self._repo, source_url)
if self.can_hotcopy(source_url):
log.info('... cloning %s via hotcopy', source_url)
# src repo is on the local filesystem - use hotcopy (faster)
source_path, dest_path = source_url[7:], self._url[7:]
fullname = os.path.join(self._repo.fs_path, self._repo.name)
# hotcopy expects dest dir to not exist yet
if os.path.exists(fullname):
shutil.rmtree(fullname)
self.check_call(['svnadmin', 'hotcopy', source_path, dest_path])
# make sure new repo has a pre-revprop-change hook,
# otherwise the sync will fail
set_hook('pre-revprop-change')
self.check_call(
['svnsync', '--non-interactive', '--allow-non-empty',
'initialize', self._url, source_url])
clear_hook('pre-revprop-change')
else:
set_hook('pre-revprop-change')
self.check_call(['svnsync', 'init', self._url, source_url])
self.check_call(
['svnsync', '--non-interactive', 'sync', self._url])
clear_hook('pre-revprop-change')
log.info('... %r cloned', self._repo)
self.update_checkout_url()
self._setup_special_files(source_url)
def update_checkout_url(self):
"""Validate the current ``checkout_url`` against the on-disk repo,
and change it if necessary.
If ``checkout_url`` is valid and not '', no changes are made.
If ``checkout_url`` is invalid or '':
- Set it to 'trunk' if repo has a top-level trunk directory
- Else, set it to ''
"""
opts = self._repo.app.config.options
if not svn_path_exists('file://{0}{1}/{2}'.format(self._repo.fs_path,
self._repo.name, opts['checkout_url'])):
opts['checkout_url'] = ''
if (not opts['checkout_url'] and
svn_path_exists(
'file://{0}{1}/trunk'.format(self._repo.fs_path,
self._repo.name))):
opts['checkout_url'] = 'trunk'
def commit(self, rev):
oid = self.rev_parse(rev)
result = M.repo.Commit.query.get(_id=oid)
if result:
result.set_context(self._repo)
return result
def rev_parse(self, rev):
if rev in ('HEAD', None):
return self._oid(self.head)
elif isinstance(rev, int) or rev.isdigit():
return self._oid(rev)
else:
return rev
def all_commit_ids(self):
"""Return a list of commit ids, starting with the head (most recent
commit) and ending with the root (first commit).
"""
head_revno = self.head
return map(self._oid, range(head_revno, 0, -1))
def new_commits(self, all_commits=False):
head_revno = self.head
oids = [self._oid(revno) for revno in range(1, head_revno + 1)]
if all_commits:
return oids
# Find max commit id -- everything greater than that will be "unknown"
prefix = self._oid('')
q = M.repo.Commit.query.find(
dict(
type='commit',
_id={'$gt': prefix},
),
dict(_id=True)
)
seen_oids = set()
for d in q.ming_cursor.cursor:
oid = d['_id']
if not oid.startswith(prefix):
break
seen_oids.add(oid)
return [
oid for oid in oids if oid not in seen_oids]
def refresh_commit_info(self, oid, seen_object_ids, lazy=True):
from allura.model.repo import CommitDoc, DiffInfoDoc
ci_doc = CommitDoc.m.get(_id=oid)
if ci_doc and lazy:
return False
revno = self._revno(oid)
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty',
oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
log_date = None
if hasattr(log_entry, 'date'):
log_date = datetime.utcfromtimestamp(log_entry.date)
user = Object(
name=h.really_unicode(log_entry.get('author', '--none--')),
email='',
date=log_date)
args = dict(
tree_id=None,
committed=user,
authored=user,
message=h.really_unicode(log_entry.get("message", "--none--")),
parent_ids=[],
child_ids=[])
if revno > 1:
args['parent_ids'] = [self._oid(revno - 1)]
if ci_doc:
ci_doc.update(**args)
ci_doc.m.save()
else:
ci_doc = CommitDoc(dict(args, _id=oid))
try:
ci_doc.m.insert(safe=True)
except DuplicateKeyError:
if lazy:
return False
# Save diff info
di = DiffInfoDoc.make(dict(_id=ci_doc._id, differences=[]))
for path in log_entry.changed_paths:
if path.action in ('A', 'M', 'R'):
try:
rhs_info = self._svn.info2(
self._url + h.really_unicode(path.path),
revision=self._revision(ci_doc._id),
recurse=False)[0][1]
rhs_id = self._obj_oid(ci_doc._id, rhs_info)
except pysvn.ClientError, e:
# pysvn will sometimes misreport deleted files (D) as
# something else (like A), causing info2() to raise a
# ClientError since the file doesn't exist in this
# revision. Set lrhs_id = None to treat like a deleted file
log.info('This error was handled gracefully and logged '
'for informational purposes only:\n' + str(e))
rhs_id = None
else:
rhs_id = None
if ci_doc.parent_ids and path.action in ('D', 'M', 'R'):
try:
lhs_info = self._svn.info2(
self._url + h.really_unicode(path.path),
revision=self._revision(ci_doc.parent_ids[0]),
recurse=False)[0][1]
lhs_id = self._obj_oid(ci_doc._id, lhs_info)
except pysvn.ClientError, e:
# pysvn will sometimes report new files as 'M'odified,
# causing info2() to raise ClientError since the file
# doesn't exist in the parent revision. Set lhs_id = None
# to treat like a newly added file.
log.info('This error was handled gracefully and logged '
'for informational purposes only:\n' + str(e))
lhs_id = None
else:
lhs_id = None
di.differences.append(dict(
name=h.really_unicode(path.path),
lhs_id=lhs_id,
rhs_id=rhs_id))
di.m.save()
return True
def compute_tree_new(self, commit, tree_path='/'):
from allura.model import repo as RM
# always leading slash, never trailing
tree_path = '/' + tree_path.strip('/')
tree_id = self._tree_oid(commit._id, tree_path)
tree = RM.Tree.query.get(_id=tree_id)
if tree:
return tree_id
log.debug('Computing tree for %s: %s',
self._revno(commit._id), tree_path)
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for: %s: %s(%s)',
self._repo, commit, tree_path)
return None
log.debug('Compute tree for %d paths', len(infos))
tree_ids = []
blob_ids = []
lcd_entries = []
for path, info in infos[1:]:
if info.kind == pysvn.node_kind.dir:
tree_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
elif info.kind == pysvn.node_kind.file:
blob_ids.append(Object(
id=self._tree_oid(commit._id, path),
name=path))
else:
assert False
lcd_entries.append(dict(
name=path,
commit_id=self._oid(info.last_changed_rev.number),
))
tree, is_new = RM.Tree.upsert(tree_id,
tree_ids=tree_ids,
blob_ids=blob_ids,
other_ids=[],
)
if is_new:
commit_id = self._oid(infos[0][1].last_changed_rev.number)
path = tree_path.strip('/')
RM.TreesDoc.m.update_partial(
{'_id': commit._id},
{'$addToSet': {'tree_ids': tree_id}},
upsert=True)
RM.LastCommitDoc.m.update_partial(
{'commit_id': commit_id, 'path': path},
{'commit_id': commit_id, 'path':
path, 'entries': lcd_entries},
upsert=True)
return tree_id
def _tree_oid(self, commit_id, path):
data = 'tree\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _blob_oid(self, commit_id, path):
data = 'blob\n%s\n%s' % (commit_id, h.really_unicode(path))
return sha1(data.encode('utf-8')).hexdigest()
def _obj_oid(self, commit_id, info):
path = info.URL[len(info.repos_root_URL):]
if info.kind == pysvn.node_kind.dir:
return self._tree_oid(commit_id, path)
else:
return self._blob_oid(commit_id, path)
def log(self, revs=None, path=None, exclude=None, id_only=True, page_size=25, **kw):
"""
Returns a generator that returns information about commits reachable
by revs.
revs can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If revs is None, the
default head will be used.
If path is not None, only commits which modify files under path
will be included.
Exclude can be None or a list or tuple of identifiers, each of which
can be anything parsable by self.commit(). If not None, then any
revisions reachable by any of the revisions in exclude will not be
included.
If id_only is True, returns only the commit ID, otherwise it returns
detailed information about each commit.
Since pysvn doesn't have a generator version of log, this tries to
balance pulling too much data from SVN with calling SVN too many
times by pulling in pages of page_size at a time.
"""
if revs is None:
revno = self.head
else:
revno = max([self._revno(self.rev_parse(r)) for r in revs])
if exclude is None:
exclude = 0
else:
exclude = max([self._revno(self.rev_parse(r)) for r in exclude])
if path is None:
url = self._url
else:
url = '/'.join([self._url, path.strip('/')])
while revno > exclude:
rev = pysvn.Revision(pysvn.opt_revision_kind.number, revno)
try:
logs = self._svn.log(
url, revision_start=rev, peg_revision=rev, limit=page_size,
discover_changed_paths=True)
except pysvn.ClientError as e:
if 'Unable to connect' in e.message:
raise # repo error
return # no (more) history for this path
for ci in logs:
if ci.revision.number <= exclude:
return
if id_only:
yield ci.revision.number
else:
yield self._map_log(ci, url, path)
if len(logs) < page_size:
# we didn't get a full page, don't bother calling SVN again
return
revno = ci.revision.number - 1
def _check_changed_path(self, changed_path, path):
if (changed_path['copyfrom_path'] and
changed_path['path'] and
path and
(len(changed_path['path']) < len(path)) and
path.startswith(changed_path['path'])):
changed_path['copyfrom_path'] = changed_path['copyfrom_path'] + \
path[len(changed_path['path']):]
changed_path['path'] = path
return changed_path
def _map_log(self, ci, url, path=None):
revno = ci.revision.number
rev = pysvn.Revision(pysvn.opt_revision_kind.number, revno)
try:
size = int(
self._svn.list(url, revision=rev, peg_revision=rev)[0][0].size)
except pysvn.ClientError:
size = None
rename_details = {}
changed_paths = ci.get('changed_paths', [])
for changed_path in changed_paths:
changed_path = self._check_changed_path(changed_path, path)
if changed_path['copyfrom_path'] and changed_path['path'] == path and changed_path['action'] == 'A':
rename_details['path'] = changed_path['copyfrom_path']
rename_details['commit_url'] = self._repo.url_for_commit(
changed_path['copyfrom_revision'].number
)
break
return {
'id': revno,
'message': h.really_unicode(ci.get('message', '--none--')),
'authored': {
'name': h.really_unicode(ci.get('author', '--none--')),
'email': '',
'date': datetime.utcfromtimestamp(ci.date),
},
'committed': {
'name': h.really_unicode(ci.get('author', '--none--')),
'email': '',
'date': datetime.utcfromtimestamp(ci.date),
},
'refs': ['HEAD'] if revno == self.head else [],
'parents': [revno - 1] if revno > 1 else [],
'size': size,
'rename_details': rename_details,
}
def open_blob(self, blob):
data = self._svn.cat(
self._url + blob.path(),
revision=self._revision(blob.commit._id))
return StringIO(data)
def blob_size(self, blob):
try:
rev = self._revision(blob.commit._id)
data = self._svn.list(
self._url + blob.path(),
revision=rev,
peg_revision=rev,
dirent_fields=pysvn.SVN_DIRENT_SIZE)
except pysvn.ClientError:
log.info('ClientError getting filesize %r %r, returning 0',
blob.path(), self._repo, exc_info=True)
return 0
try:
size = data[0][0]['size']
except (IndexError, KeyError):
log.info(
'Error getting filesize: bad data from svn client %r %r, returning 0',
blob.path(), self._repo, exc_info=True)
size = 0
return size
def _setup_hooks(self, source_path=None):
'Set up the post-commit and pre-revprop-change hooks'
# setup a post-commit hook to notify Allura of changes to the repo
# the hook should also call the user-defined post-commit-user hook
text = self.post_receive_template.substitute(
url=self._repo.refresh_url())
fn = os.path.join(self._repo.fs_path, self._repo.name,
'hooks', 'post-commit')
with open(fn, 'wb') as fp:
fp.write(text)
os.chmod(fn, 0755)
def _revno(self, oid):
return int(oid.split(':')[1])
def _revision(self, oid):
return pysvn.Revision(
pysvn.opt_revision_kind.number,
self._revno(oid))
def _oid(self, revno):
return '%s:%s' % (self._repo._id, revno)
def last_commit_ids(self, commit, paths):
'''
Return a mapping {path: commit_id} of the _id of the last
commit to touch each path, starting from the given commit.
Since SVN Diffs are computed on-demand, we can't walk the
commit tree to find these. However, we can ask SVN for it
with a single call, so it shouldn't be too expensive.
NB: This assumes that all paths are direct children of a
single common parent path (i.e., you are only asking for
a subset of the nodes of a single tree, one level deep).
'''
if len(paths) == 1:
tree_path = '/' + os.path.dirname(paths[0].strip('/'))
else:
# always leading slash, never trailing
tree_path = '/' + os.path.commonprefix(paths).strip('/')
paths = [path.strip('/') for path in paths]
rev = self._revision(commit._id)
try:
infos = self._svn.info2(
self._url + tree_path,
revision=rev,
depth=pysvn.depth.immediates)
except pysvn.ClientError:
log.exception('Error computing tree for: %s: %s(%s)',
self._repo, commit, tree_path)
return None
entries = {}
for path, info in infos[1:]:
path = os.path.join(tree_path, path).strip('/')
if path in paths:
entries[path] = self._oid(info.last_changed_rev.number)
return entries
def get_changes(self, oid):
rev = self._revision(oid)
try:
log_entry = self._svn.log(
self._url,
revision_start=rev,
limit=1,
discover_changed_paths=True)[0]
except pysvn.ClientError:
log.info('ClientError processing %r %r, treating as empty',
oid, self._repo, exc_info=True)
log_entry = Object(date='', message='', changed_paths=[])
return [p.path for p in log_entry.changed_paths]
def _path_to_root(self, path, rev=None):
'''Return tag/branch/trunk root for given path inside svn repo'''
if path:
path = path.strip('/').split('/')
idx = None
if 'tags' in path:
idx = path.index('tags')
elif 'branches' in path:
idx = path.index('branches')
# e.g. path/tags/tag-1.0/...
if idx is not None and idx < len(path) - 1:
return '/'.join(path[:idx + 2]) # path/tags/tag-1.0
if 'trunk' in path:
idx = path.index('trunk')
return '/'.join(path[:idx + 1]) # path/trunk
# no tag/brach/trunk in path
trunk_exists = svn_path_exists(
'file://%s%s/%s' % (self._repo.fs_path, self._repo.name, 'trunk'), rev)
if trunk_exists:
return 'trunk'
return ''
def tarball(self, commit, path=None):
path = self._path_to_root(path, commit)
if not os.path.exists(self._repo.tarball_path):
os.makedirs(self._repo.tarball_path)
archive_name = self._repo.tarball_filename(commit, path)
dest = os.path.join(self._repo.tarball_path, archive_name)
filename = os.path.join(self._repo.tarball_path, '%s%s' %
(archive_name, '.zip'))
tmpfilename = os.path.join(self._repo.tarball_path, '%s%s' %
(archive_name, '.tmp'))
rmtree(dest, ignore_errors=True)
path = os.path.join(self._url, path)
try:
# need to set system locale to handle all symbols in filename
import locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
self._svn.export(path,
dest,
revision=pysvn.Revision(
pysvn.opt_revision_kind.number, commit),
ignore_externals=True)
zipdir(dest, tmpfilename)
os.rename(tmpfilename, filename)
finally:
rmtree(dest, ignore_errors=True)
if os.path.exists(tmpfilename):
os.remove(tmpfilename)
def is_empty(self):
return self.head == 0
def is_file(self, path, rev=None):
url = '/'.join([self._url, path.strip('/')])
rev = pysvn.Revision(pysvn.opt_revision_kind.number,
self._revno(self.rev_parse(rev)))
try:
info = self._svn.list(
url, revision=rev, peg_revision=rev, dirent_fields=pysvn.SVN_DIRENT_KIND)[0][0]
return info.kind == pysvn.node_kind.file
except pysvn.ClientError:
return False
def symbolics_for_commit(self, commit):
return [], []
@LazyProperty
def head(self):
try:
return int(self._svn.revpropget('revision', url=self._url)[0].number)
except pysvn.ClientError as e:
if str(e).startswith("Unable to connect") or \
str(e).startswith("Unable to open"):
return 0
else:
raise
@LazyProperty
def heads(self):
return [Object(name=None, object_id=self._oid(self.head))]
@LazyProperty
def branches(self):
return []
@LazyProperty
def tags(self):
return []
Mapper.compile_all()
| |
import os
import socket
from os import path
from uuid import uuid4
from cattle.utils import memoize
CONFIG_OVERRIDE = {}
try:
import eventlet # NOQA
except:
pass
def default_value(name, default):
if name in CONFIG_OVERRIDE:
return CONFIG_OVERRIDE[name]
return os.environ.get('CATTLE_%s' % name, default)
_SCHEMAS = '/schemas'
def _strip_schemas(url):
if url is None:
return None
if url.endswith(_SCHEMAS):
return url[0:len(url)-len(_SCHEMAS)]
return url
class Config:
def __init__(self):
pass
@staticmethod
@memoize
def _get_uuid_from_file(uuid_file):
uuid = None
if path.exists(uuid_file):
with open(uuid_file) as f:
uuid = f.read().strip()
if len(uuid) == 0:
uuid = None
if uuid is None:
uuid = str(uuid4())
with open(uuid_file, 'w') as f:
f.write(uuid)
return uuid
@staticmethod
def physical_host_uuid_file():
def_value = '{0}/.physical_host_uuid'.format(Config.home())
return default_value('PHYSICAL_HOST_UUID_FILE', def_value)
@staticmethod
def physical_host_uuid():
return Config.get_uuid_from_file('PHYSICAL_HOST_UUID',
Config.physical_host_uuid_file())
@staticmethod
def setup_logger():
return default_value('LOGGER', 'true') == 'true'
@staticmethod
def do_ping():
return default_value('PING_ENABLED', 'true') == 'true'
@staticmethod
def get_uuid_from_file(env_name, uuid_file):
uuid = default_value(env_name, None)
if uuid is not None:
return uuid
return Config._get_uuid_from_file(uuid_file)
@staticmethod
def hostname():
return default_value('HOSTNAME', socket.gethostname())
@staticmethod
def workers():
return int(default_value('WORKERS', '50'))
@staticmethod
def set_secret_key(value):
CONFIG_OVERRIDE['SECRET_KEY'] = value
@staticmethod
def secret_key():
return default_value('SECRET_KEY', 'adminpass')
@staticmethod
def set_access_key(value):
CONFIG_OVERRIDE['ACCESS_KEY'] = value
@staticmethod
def access_key():
return default_value('ACCESS_KEY', 'admin')
@staticmethod
def set_api_url(value):
CONFIG_OVERRIDE['URL'] = value
@staticmethod
def api_url(default=None):
return _strip_schemas(default_value('URL', default))
@staticmethod
def api_auth():
return Config.access_key(), Config.secret_key()
@staticmethod
def config_url():
ret = default_value('CONFIG_URL', None)
if ret is None:
return Config.api_url()
else:
return ret
@staticmethod
def is_multi_proc():
return Config.multi_style() == 'proc'
@staticmethod
def is_multi_thread():
return Config.multi_style() == 'thread'
@staticmethod
def is_eventlet():
if 'eventlet' not in globals():
return False
setting = default_value('AGENT_MULTI', None)
if setting is None or setting == 'eventlet':
return True
return False
@staticmethod
def multi_style():
return default_value('AGENT_MULTI', 'proc')
@staticmethod
def queue_depth():
return int(default_value('QUEUE_DEPTH', 5))
@staticmethod
def stop_timeout():
return int(default_value('STOP_TIMEOUT', 60))
@staticmethod
def log():
return default_value('AGENT_LOG_FILE', 'agent.log')
@staticmethod
def debug():
return default_value('DEBUG', 'false') == 'true'
@staticmethod
def home():
return default_value('HOME', '/var/lib/cattle')
@staticmethod
def agent_ip():
return default_value('AGENT_IP', None)
@staticmethod
def agent_port():
return default_value('AGENT_PORT', None)
@staticmethod
def config_sh():
return default_value('CONFIG_SCRIPT',
'{0}/config.sh'.format(Config.home()))
@staticmethod
def physical_host():
return {
'uuid': Config.physical_host_uuid(),
'type': 'physicalHost',
'kind': 'physicalHost',
'name': Config.hostname()
}
@staticmethod
def api_proxy_listen_port():
return int(default_value('API_PROXY_LISTEN_PORT', '9342'))
@staticmethod
def api_proxy_listen_host():
return default_value('API_PROXY_LISTEN_HOST', '0.0.0.0')
@staticmethod
def agent_instance_cattle_home():
return default_value('AGENT_INSTANCE_CATTLE_HOME', '/var/lib/cattle')
@staticmethod
def lock_dir():
return default_value('LOCK_DIR', os.path.join(Config.home(), 'locks'))
@staticmethod
def stamp():
return default_value('STAMP_FILE', os.path.join(Config.home(),
'.pyagent-stamp'))
@staticmethod
def config_update_pyagent():
return default_value('CONFIG_UPDATE_PYAGENT', 'true') == 'true'
@staticmethod
def max_dropped_requests():
return int(default_value('MAX_DROPPED_REQUESTS', '1000'))
@staticmethod
def max_dropped_ping():
return int(default_value('MAX_DROPPED_PING', '10'))
@staticmethod
def cadvisor_port():
return int(default_value('CADVISOR_PORT', '9344'))
@staticmethod
def cadvisor_ip():
return default_value('CADVISOR_IP', '127.0.0.1')
@staticmethod
def host_api_ip():
return default_value('HOST_API_IP', '0.0.0.0')
@staticmethod
def host_api_port():
return int(default_value('HOST_API_PORT', '9345'))
@staticmethod
def console_agent_port():
return int(default_value('CONSOLE_AGENT_PORT', '9346'))
@staticmethod
def console_agent_main():
return default_value('CONSOLE_AGENT_MAIN',
os.path.join(Config.home(), 'console-agent',
'agent.js'))
@staticmethod
def jwt_public_key_file():
value = os.path.join(Config.home(), 'etc', 'cattle', 'api.crt')
return default_value('CONSOLE_AGENT_PORT', value)
@staticmethod
def host_api_config_file():
default_path = os.path.join(Config.home(), 'etc', 'cattle',
'host-api.conf')
return default_value('HOST_API_CONFIG_FILE', default_path)
@staticmethod
def event_read_timeout():
return int(default_value('EVENT_READ_TIMEOUT', '60'))
@staticmethod
def eventlet_backdoor():
val = default_value('EVENTLET_BACKDOOR', None)
if val:
return int(val)
else:
return None
| |
#!/usr/bin/env python
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This tool creates an html visualization of a TensorFlow Lite graph.
Example usage:
python visualize.py foo.tflite foo.html
"""
import json
import os
import re
import sys
import numpy as np
# pylint: disable=g-import-not-at-top
if not os.path.splitext(__file__)[0].endswith(
os.path.join("tflite_runtime", "visualize")):
# This file is part of tensorflow package.
from tensorflow.lite.python import schema_py_generated as schema_fb
else:
# This file is part of tflite_runtime package.
from tflite_runtime import schema_py_generated as schema_fb
# A CSS description for making the visualizer
_CSS = """
<html>
<head>
<style>
body {font-family: sans-serif; background-color: #fa0;}
table {background-color: #eca;}
th {background-color: black; color: white;}
h1 {
background-color: ffaa00;
padding:5px;
color: black;
}
svg {
margin: 10px;
border: 2px;
border-style: solid;
border-color: black;
background: white;
}
div {
border-radius: 5px;
background-color: #fec;
padding:5px;
margin:5px;
}
.tooltip {color: blue;}
.tooltip .tooltipcontent {
visibility: hidden;
color: black;
background-color: yellow;
padding: 5px;
border-radius: 4px;
position: absolute;
z-index: 1;
}
.tooltip:hover .tooltipcontent {
visibility: visible;
}
.edges line {
stroke: #333;
}
text {
font-weight: bold;
}
.nodes text {
color: black;
pointer-events: none;
font-family: sans-serif;
font-size: 11px;
}
</style>
<script src="https://d3js.org/d3.v4.min.js"></script>
</head>
<body>
"""
_D3_HTML_TEMPLATE = """
<script>
function buildGraph() {
// Build graph data
var graph = %s;
var svg = d3.select("#subgraph%d")
var width = svg.attr("width");
var height = svg.attr("height");
// Make the graph scrollable.
svg = svg.call(d3.zoom().on("zoom", function() {
svg.attr("transform", d3.event.transform);
})).append("g");
var color = d3.scaleOrdinal(d3.schemeDark2);
var simulation = d3.forceSimulation()
.force("link", d3.forceLink().id(function(d) {return d.id;}))
.force("charge", d3.forceManyBody())
.force("center", d3.forceCenter(0.5 * width, 0.5 * height));
var edge = svg.append("g").attr("class", "edges").selectAll("line")
.data(graph.edges).enter().append("path").attr("stroke","black").attr("fill","none")
// Make the node group
var node = svg.selectAll(".nodes")
.data(graph.nodes)
.enter().append("g")
.attr("x", function(d){return d.x})
.attr("y", function(d){return d.y})
.attr("transform", function(d) {
return "translate( " + d.x + ", " + d.y + ")"
})
.attr("class", "nodes")
.call(d3.drag()
.on("start", function(d) {
if(!d3.event.active) simulation.alphaTarget(1.0).restart();
d.fx = d.x;d.fy = d.y;
})
.on("drag", function(d) {
d.fx = d3.event.x; d.fy = d3.event.y;
})
.on("end", function(d) {
if (!d3.event.active) simulation.alphaTarget(0);
d.fx = d.fy = null;
}));
// Within the group, draw a box for the node position and text
// on the side.
var node_width = 150;
var node_height = 30;
node.append("rect")
.attr("r", "5px")
.attr("width", node_width)
.attr("height", node_height)
.attr("rx", function(d) { return d.group == 1 ? 1 : 10; })
.attr("stroke", "#000000")
.attr("fill", function(d) { return d.group == 1 ? "#dddddd" : "#000000"; })
node.append("text")
.text(function(d) { return d.name; })
.attr("x", 5)
.attr("y", 20)
.attr("fill", function(d) { return d.group == 1 ? "#000000" : "#eeeeee"; })
// Setup force parameters and update position callback
var node = svg.selectAll(".nodes")
.data(graph.nodes);
// Bind the links
var name_to_g = {}
node.each(function(data, index, nodes) {
console.log(data.id)
name_to_g[data.id] = this;
});
function proc(w, t) {
return parseInt(w.getAttribute(t));
}
edge.attr("d", function(d) {
function lerp(t, a, b) {
return (1.0-t) * a + t * b;
}
var x1 = proc(name_to_g[d.source],"x") + node_width /2;
var y1 = proc(name_to_g[d.source],"y") + node_height;
var x2 = proc(name_to_g[d.target],"x") + node_width /2;
var y2 = proc(name_to_g[d.target],"y");
var s = "M " + x1 + " " + y1
+ " C " + x1 + " " + lerp(.5, y1, y2)
+ " " + x2 + " " + lerp(.5, y1, y2)
+ " " + x2 + " " + y2
return s;
});
}
buildGraph()
</script>
"""
def TensorTypeToName(tensor_type):
"""Converts a numerical enum to a readable tensor type."""
for name, value in schema_fb.TensorType.__dict__.items():
if value == tensor_type:
return name
return None
def BuiltinCodeToName(code):
"""Converts a builtin op code enum to a readable name."""
for name, value in schema_fb.BuiltinOperator.__dict__.items():
if value == code:
return name
return None
def NameListToString(name_list):
"""Converts a list of integers to the equivalent ASCII string."""
if isinstance(name_list, str):
return name_list
else:
result = ""
if name_list is not None:
for val in name_list:
result = result + chr(int(val))
return result
class OpCodeMapper(object):
"""Maps an opcode index to an op name."""
def __init__(self, data):
self.code_to_name = {}
for idx, d in enumerate(data["operator_codes"]):
self.code_to_name[idx] = BuiltinCodeToName(d["builtin_code"])
if self.code_to_name[idx] == "CUSTOM":
self.code_to_name[idx] = NameListToString(d["custom_code"])
def __call__(self, x):
if x not in self.code_to_name:
s = "<UNKNOWN>"
else:
s = self.code_to_name[x]
return "%s (%d)" % (s, x)
class DataSizeMapper(object):
"""For buffers, report the number of bytes."""
def __call__(self, x):
if x is not None:
return "%d bytes" % len(x)
else:
return "--"
class TensorMapper(object):
"""Maps a list of tensor indices to a tooltip hoverable indicator of more."""
def __init__(self, subgraph_data):
self.data = subgraph_data
def __call__(self, x):
html = ""
if x is None:
return html
html += "<span class='tooltip'><span class='tooltipcontent'>"
for i in x:
tensor = self.data["tensors"][i]
html += str(i) + " "
html += NameListToString(tensor["name"]) + " "
html += TensorTypeToName(tensor["type"]) + " "
html += (repr(tensor["shape"]) if "shape" in tensor else "[]")
html += (repr(tensor["shape_signature"])
if "shape_signature" in tensor else "[]") + "<br>"
html += "</span>"
html += repr(x)
html += "</span>"
return html
def GenerateGraph(subgraph_idx, g, opcode_mapper):
"""Produces the HTML required to have a d3 visualization of the dag."""
def TensorName(idx):
return "t%d" % idx
def OpName(idx):
return "o%d" % idx
edges = []
nodes = []
first = {}
second = {}
pixel_mult = 200 # TODO(aselle): multiplier for initial placement
width_mult = 170 # TODO(aselle): multiplier for initial placement
for op_index, op in enumerate(g["operators"] or []):
if op["inputs"] is not None:
for tensor_input_position, tensor_index in enumerate(op["inputs"]):
if tensor_index not in first:
first[tensor_index] = ((op_index - 0.5 + 1) * pixel_mult,
(tensor_input_position + 1) * width_mult)
edges.append({
"source": TensorName(tensor_index),
"target": OpName(op_index)
})
if op["outputs"] is not None:
for tensor_output_position, tensor_index in enumerate(op["outputs"]):
if tensor_index not in second:
second[tensor_index] = ((op_index + 0.5 + 1) * pixel_mult,
(tensor_output_position + 1) * width_mult)
edges.append({
"target": TensorName(tensor_index),
"source": OpName(op_index)
})
nodes.append({
"id": OpName(op_index),
"name": opcode_mapper(op["opcode_index"]),
"group": 2,
"x": pixel_mult,
"y": (op_index + 1) * pixel_mult
})
for tensor_index, tensor in enumerate(g["tensors"]):
initial_y = (
first[tensor_index] if tensor_index in first else
second[tensor_index] if tensor_index in second else (0, 0))
nodes.append({
"id": TensorName(tensor_index),
"name": "%r (%d)" % (getattr(tensor, "shape", []), tensor_index),
"group": 1,
"x": initial_y[1],
"y": initial_y[0]
})
graph_str = json.dumps({"nodes": nodes, "edges": edges})
html = _D3_HTML_TEMPLATE % (graph_str, subgraph_idx)
return html
def GenerateTableHtml(items, keys_to_print, display_index=True):
"""Given a list of object values and keys to print, make an HTML table.
Args:
items: Items to print an array of dicts.
keys_to_print: (key, display_fn). `key` is a key in the object. i.e.
items[0][key] should exist. display_fn is the mapping function on display.
i.e. the displayed html cell will have the string returned by
`mapping_fn(items[0][key])`.
display_index: add a column which is the index of each row in `items`.
Returns:
An html table.
"""
html = ""
# Print the list of items
html += "<table><tr>\n"
html += "<tr>\n"
if display_index:
html += "<th>index</th>"
for h, mapper in keys_to_print:
html += "<th>%s</th>" % h
html += "</tr>\n"
for idx, tensor in enumerate(items):
html += "<tr>\n"
if display_index:
html += "<td>%d</td>" % idx
# print tensor.keys()
for h, mapper in keys_to_print:
val = tensor[h] if h in tensor else None
val = val if mapper is None else mapper(val)
html += "<td>%s</td>\n" % val
html += "</tr>\n"
html += "</table>\n"
return html
def CamelCaseToSnakeCase(camel_case_input):
"""Converts an identifier in CamelCase to snake_case."""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_case_input)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
def FlatbufferToDict(fb, preserve_as_numpy):
"""Converts a hierarchy of FB objects into a nested dict.
We avoid transforming big parts of the flat buffer into python arrays. This
speeds conversion from ten minutes to a few seconds on big graphs.
Args:
fb: a flat buffer structure. (i.e. ModelT)
preserve_as_numpy: true if all downstream np.arrays should be preserved.
false if all downstream np.array should become python arrays
Returns:
A dictionary representing the flatbuffer rather than a flatbuffer object.
"""
if isinstance(fb, int) or isinstance(fb, float) or isinstance(fb, str):
return fb
elif hasattr(fb, "__dict__"):
result = {}
for attribute_name in dir(fb):
attribute = fb.__getattribute__(attribute_name)
if not callable(attribute) and attribute_name[0] != "_":
snake_name = CamelCaseToSnakeCase(attribute_name)
preserve = True if attribute_name == "buffers" else preserve_as_numpy
result[snake_name] = FlatbufferToDict(attribute, preserve)
return result
elif isinstance(fb, np.ndarray):
return fb if preserve_as_numpy else fb.tolist()
elif hasattr(fb, "__len__"):
return [FlatbufferToDict(entry, preserve_as_numpy) for entry in fb]
else:
return fb
def CreateDictFromFlatbuffer(buffer_data):
model_obj = schema_fb.Model.GetRootAsModel(buffer_data, 0)
model = schema_fb.ModelT.InitFromObj(model_obj)
return FlatbufferToDict(model, preserve_as_numpy=False)
def create_html(tflite_input, input_is_filepath=True): # pylint: disable=invalid-name
"""Returns html description with the given tflite model.
Args:
tflite_input: TFLite flatbuffer model path or model object.
input_is_filepath: Tells if tflite_input is a model path or a model object.
Returns:
Dump of the given tflite model in HTML format.
Raises:
RuntimeError: If the input is not valid.
"""
# Convert the model into a JSON flatbuffer using flatc (build if doesn't
# exist.
if input_is_filepath:
if not os.path.exists(tflite_input):
raise RuntimeError("Invalid filename %r" % tflite_input)
if tflite_input.endswith(".tflite") or tflite_input.endswith(".bin"):
with open(tflite_input, "rb") as file_handle:
file_data = bytearray(file_handle.read())
data = CreateDictFromFlatbuffer(file_data)
elif tflite_input.endswith(".json"):
data = json.load(open(tflite_input))
else:
raise RuntimeError("Input file was not .tflite or .json")
else:
data = CreateDictFromFlatbuffer(tflite_input)
html = ""
html += _CSS
html += "<h1>TensorFlow Lite Model</h2>"
data["filename"] = tflite_input # Avoid special case
toplevel_stuff = [("filename", None), ("version", None),
("description", None)]
html += "<table>\n"
for key, mapping in toplevel_stuff:
if not mapping:
mapping = lambda x: x
html += "<tr><th>%s</th><td>%s</td></tr>\n" % (key, mapping(data.get(key)))
html += "</table>\n"
# Spec on what keys to display
buffer_keys_to_display = [("data", DataSizeMapper())]
operator_keys_to_display = [("builtin_code", BuiltinCodeToName),
("custom_code", NameListToString),
("version", None)]
# Update builtin code fields.
for d in data["operator_codes"]:
d["builtin_code"] = max(d["builtin_code"], d["deprecated_builtin_code"])
for subgraph_idx, g in enumerate(data["subgraphs"]):
# Subgraph local specs on what to display
html += "<div class='subgraph'>"
tensor_mapper = TensorMapper(g)
opcode_mapper = OpCodeMapper(data)
op_keys_to_display = [("inputs", tensor_mapper), ("outputs", tensor_mapper),
("builtin_options", None),
("opcode_index", opcode_mapper)]
tensor_keys_to_display = [("name", NameListToString),
("type", TensorTypeToName), ("shape", None),
("shape_signature", None), ("buffer", None),
("quantization", None)]
html += "<h2>Subgraph %d</h2>\n" % subgraph_idx
# Inputs and outputs.
html += "<h3>Inputs/Outputs</h3>\n"
html += GenerateTableHtml([{
"inputs": g["inputs"],
"outputs": g["outputs"]
}], [("inputs", tensor_mapper), ("outputs", tensor_mapper)],
display_index=False)
# Print the tensors.
html += "<h3>Tensors</h3>\n"
html += GenerateTableHtml(g["tensors"], tensor_keys_to_display)
# Print the ops.
if g["operators"]:
html += "<h3>Ops</h3>\n"
html += GenerateTableHtml(g["operators"], op_keys_to_display)
# Visual graph.
html += "<svg id='subgraph%d' width='1600' height='900'></svg>\n" % (
subgraph_idx,)
html += GenerateGraph(subgraph_idx, g, opcode_mapper)
html += "</div>"
# Buffers have no data, but maybe in the future they will
html += "<h2>Buffers</h2>\n"
html += GenerateTableHtml(data["buffers"], buffer_keys_to_display)
# Operator codes
html += "<h2>Operator Codes</h2>\n"
html += GenerateTableHtml(data["operator_codes"], operator_keys_to_display)
html += "</body></html>\n"
return html
def main(argv):
try:
tflite_input = argv[1]
html_output = argv[2]
except IndexError:
print("Usage: %s <input tflite> <output html>" % (argv[0]))
else:
html = create_html(tflite_input)
with open(html_output, "w") as output_file:
output_file.write(html)
if __name__ == "__main__":
main(sys.argv)
| |
# -*- coding: utf-8 -*-
from . import Pipeline, Converter, Options, Accessor, accessors
from .. import xlplatform
from ..main import Range
import datetime
try:
import numpy as np
except ImportError:
np = None
_date_handlers = {
datetime.datetime: datetime.datetime,
datetime.date: lambda year, month, day, **kwargs: datetime.date(year, month, day)
}
_number_handlers = {
int: lambda x: int(round(x)),
'raw int': int,
}
class ExpandRangeStage(object):
def __init__(self, options):
self.expand = options.get('expand', None)
def __call__(self, c):
if c.range:
# auto-expand the range
if self.expand:
c.range = c.range.expand(self.expand)
class ClearExpandedRangeStage(object):
def __init__(self, options):
self.expand = options.get('expand', None)
self.skip = options.get('_skip_tl_cells', None)
if self.skip is None:
self.skip = (0, 0)
def __call__(self, ctx):
if ctx.range and self.expand:
from ..expansion import expanders
expander = expanders.get(self.expand, self.expand)
vrows = len(ctx.value)
vcols = vrows and len(ctx.value[0])
expander.clear(
ctx.range,
skip=self.skip,
vshape=(vrows, vcols),
)
class WriteValueToRangeStage(object):
def __init__(self, options, raw=False):
self.skip = options.get('_skip_tl_cells', None)
self.raw = raw
def _write_value(self, rng, value, scalar):
if rng.api and value:
# it is assumed by this stage that value is a list of lists
if scalar:
value = value[0][0]
else:
rng = rng.resize(len(value), len(value[0]))
rng.raw_value = value
def __call__(self, ctx):
if ctx.range and ctx.value:
if self.raw:
ctx.range.raw_value = ctx.value
return
scalar = ctx.meta.get('scalar', False)
if not scalar:
ctx.range = ctx.range.resize(len(ctx.value), len(ctx.value[0]))
if self.skip:
r, c = self.skip
if scalar:
self._write_value(ctx.range[:r, c:], ctx.value, True)
self._write_value(ctx.range[r:, :], ctx.value, True)
else:
self._write_value(ctx.range[:r, c:], [x[c:] for x in ctx.value[:r]], False)
self._write_value(ctx.range[r:, :], ctx.value[r:], False)
else:
self._write_value(ctx.range, ctx.value, scalar)
class ReadValueFromRangeStage(object):
def __call__(self, c):
if c.range:
c.value = c.range.raw_value
class CleanDataFromReadStage(object):
def __init__(self, options):
dates_as = options.get('dates', datetime.datetime)
self.empty_as = options.get('empty', None)
self.dates_handler = _date_handlers.get(dates_as, dates_as)
numbers_as = options.get('numbers', None)
self.numbers_handler = _number_handlers.get(numbers_as, numbers_as)
def __call__(self, c):
c.value = xlplatform.clean_value_data(c.value, self.dates_handler, self.empty_as, self.numbers_handler)
class CleanDataForWriteStage(object):
def __call__(self, c):
c.value = [
[
xlplatform.prepare_xl_data_element(x)
for x in y
]
for y in c.value
]
class AdjustDimensionsStage(object):
def __init__(self, options):
self.ndim = options.get('ndim', None)
def __call__(self, c):
# the assumption is that value is 2-dimensional at this stage
if self.ndim is None:
if len(c.value) == 1:
c.value = c.value[0][0] if len(c.value[0]) == 1 else c.value[0]
elif len(c.value[0]) == 1:
c.value = [x[0] for x in c.value]
else:
c.value = c.value
elif self.ndim == 1:
if len(c.value) == 1:
c.value = c.value[0]
elif len(c.value[0]) == 1:
c.value = [x[0] for x in c.value]
else:
raise Exception("Range must be 1-by-n or n-by-1 when ndim=1.")
# ndim = 2 is a no-op
elif self.ndim != 2:
raise ValueError('Invalid c.value ndim=%s' % self.ndim)
class Ensure2DStage(object):
def __call__(self, c):
if isinstance(c.value, (list, tuple)):
if len(c.value) > 0:
if not isinstance(c.value[0], (list, tuple)):
c.value = [c.value]
else:
c.meta['scalar'] = True
c.value = [[c.value]]
class TransposeStage(object):
def __call__(self, c):
c.value = [[e[i] for e in c.value] for i in range(len(c.value[0]) if c.value else 0)]
class BaseAccessor(Accessor):
@classmethod
def reader(cls, options):
return (
Pipeline()
.append_stage(ExpandRangeStage(options), only_if=options.get('expand', None))
)
class RangeAccessor(Accessor):
@staticmethod
def copy_range_to_value(c):
c.value = c.range
@classmethod
def reader(cls, options):
return (
BaseAccessor.reader(options)
.append_stage(RangeAccessor.copy_range_to_value)
)
RangeAccessor.register(Range)
class RawValueAccessor(Accessor):
@classmethod
def reader(cls, options):
return (
Accessor.reader(options)
.append_stage(ReadValueFromRangeStage())
)
@classmethod
def writer(cls, options):
return (
Accessor.writer(options)
.prepend_stage(WriteValueToRangeStage(raw=True))
)
RawValueAccessor.register('raw')
class ValueAccessor(Accessor):
@staticmethod
def reader(options):
return (
BaseAccessor.reader(options)
.append_stage(ReadValueFromRangeStage())
.append_stage(Ensure2DStage())
.append_stage(CleanDataFromReadStage(options))
.append_stage(TransposeStage(), only_if=options.get('transpose', False))
.append_stage(AdjustDimensionsStage(options))
)
@staticmethod
def writer(options):
return (
Pipeline()
.prepend_stage(WriteValueToRangeStage(options))
.prepend_stage(ClearExpandedRangeStage(options), only_if=options.get('expand', None))
.prepend_stage(CleanDataForWriteStage())
.prepend_stage(TransposeStage(), only_if=options.get('transpose', False))
.prepend_stage(Ensure2DStage())
)
@classmethod
def router(cls, value, rng, options):
return accessors.get(type(value), cls)
ValueAccessor.register(None)
class DictConverter(Converter):
writes_types = dict
@classmethod
def base_reader(cls, options):
return (
super(DictConverter, cls).base_reader(
Options(options)
.override(ndim=2)
)
)
@classmethod
def read_value(cls, value, options):
assert not value or len(value[0]) == 2
return dict(value)
@classmethod
def write_value(cls, value, options):
return list(value.items())
DictConverter.register(dict)
| |
"""
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_unicode, iri_to_uri
from django.utils import datetime_safe
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependant results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d%02d" % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
if is_aware(date):
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d:%02d" % (hour, minute)
else:
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse.urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [force_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
from io import BytesIO
s = BytesIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()).decode('utf-8'))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']).decode('utf-8'))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement("guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()).decode('utf-8'))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement("updated", rfc3339_date(item['pubdate']).decode('utf-8'))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| |
"""
This file exports a method that applies a simple effect (see
card_decoder/cards.py) to a board (given that the board has a current player)
given the targets (if necessary).
Many effects have different requirements and will raise exceptions if the
requirements aren't met.
Targets are card names, not specific cards.
"""
import random
from events import raise_strategy_card_events, raise_strategy_card_events_for_player
# Returns a list of pending moves in the form of tuples (move_type, card_name, targets)
# We do this to avoid a cyclic dependency between this file and moves.py
def apply_simple_effect(board, effect, targets):
fn = {
1: _discard_cards,
2: _draw_cards,
3: _gain_runes,
4: _gain_honor,
5: _gain_power,
6: _banish_card_from_hand,
7: _banish_card_from_center,
8: _banish_card_from_discard,
9: _defeat_monster,
10: _pay_less_runes_toward_mechana_construct,
11: _acquire_hero,
12: _gain_honor_for_lifebound_hero,
13: _draw_card_for_mechana_construct,
14: _banish_for_additional_turn,
15: _pay_less_runes_toward_construct,
16: _gain_honor_for_defeating_monster,
17: _draw_for_two_or_more_constructs,
18: _gain_honor_per_faction_of_constructs,
19: _each_opponent_destroys_construct,
20: _put_acquired_mechana_construct_into_play,
21: _treat_all_constructs_as_mechana_constructs,
22: _take_random_card_from_each_opponent,
23: _gain_power_if_lifebound_hero_played,
24: _opponents_destroy_all_but_one_construct,
25: _gain_power_for_each_mechana_construct,
26: _copy_hero,
27: _acquire_or_defeat_anything
}[effect.effect_index]
pending_moves = fn(board, effect.param, targets[effect.effect_index], targets)
return [] if pending_moves is None else pending_moves
def _discard_cards(board, param, my_targets, all_targets):
assert param == len(my_targets), "Expected %d targets; got %s" % (
param, str(my_targets))
for card_name in my_targets:
# Raises an exception if the card isn't in the player's hand
board.current_player().discard_card(card_name)
def _draw_cards(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
for i in xrange(param):
board.current_player().draw_card()
def _gain_runes(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
assert board.current_player().runes_remaining + param >= 0, "Not enough runes to play this card"
board.current_player().runes_remaining += param
# Some cards cost runes to play; the effect is in the form of gaining
# negative runes
assert board.current_player().runes_remaining >= 0
def _gain_honor(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
assert param >= 0, "Effect should not give negative honor"
board.give_honor(board.current_player(), param)
def _gain_power(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
assert board.current_player().power_remaining + param >= 0, "Not enough power to play this card"
board.current_player().power_remaining += param
def _banish_card_from_hand(board, param, my_targets, all_targets):
assert param == len(my_targets), "Expected %d targets; got %s" % (
param, str(my_targets))
for card_name in my_targets:
card = board.current_player().remove_card_from_hand(card_name)
board.void.append(card)
raise_strategy_card_events(board, 'banished_from_deck', card_name)
def _banish_card_from_center(board, param, my_targets, all_targets):
assert param == len(my_targets), "Expected %d targets; got %s" % (
param, str(my_targets))
assert "Avatar of the Fallen" not in my_targets, "Cannot banish Avatar of the Fallen"
for card_name in my_targets:
card = board.remove_card_from_center(card_name)
board.void.append(card)
raise_strategy_card_events(board, 'banished_from_center', card_name)
def _banish_card_from_discard(board, param, my_targets, all_targets):
assert param == len(my_targets), "Expected %d targets; got %s" % (
param, str(my_targets))
for card_name in my_targets:
card = board.current_player().remove_card_from_discard(card_name)
board.void.append(card)
raise_strategy_card_events(board, 'banished_from_deck', card_name)
def _defeat_monster(board, param, my_targets, all_targets):
assert len(my_targets) == 1, "Expected 1 target; got %s" % str(my_targets)
card_name = my_targets[0]
card = board.card_dictionary.find_card(card_name)
assert card.card_type == "Monster", "Tried to defeat %s, which is not a monster" % (
card.name)
assert card.cost <= param, ("Can only use this effect to defeat monsters up to %d power" +
" (%s costs %d power)" % (param, card.name, card.cost))
board.current_player().power_remaining += card.cost
return [("defeat", card_name, all_targets)]
def _pay_less_runes_toward_mechana_construct(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
board.current_player().runes_toward_mechana_constructs += param
def _acquire_hero(board, param, my_targets, all_targets):
assert len(my_targets) == 1, "Expected 1 target; got %s" % str(my_targets)
card_name = my_targets[0]
card = board.card_dictionary.find_card(card_name)
assert "Hero" in card.card_type, "Tried to acquire %s, which is not a hero" % (
card.name)
assert card.cost <= param, ("Can only use this effect to acquire heros up to %d runes" +
" (%s costs %d runes)" % (param, card.name, card.cost))
board.remove_card_from_center(card.name)
board.current_player().deck.cards.append(card)
raise_strategy_card_events(board, 'acquired_card', card_name)
def _gain_honor_for_lifebound_hero(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
board.current_player().honor_for_lifebound_hero += param
def _draw_card_for_mechana_construct(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
assert board.current_player().has_played_mechana_construct()
board.current_player().draw_card()
def _banish_for_additional_turn(board, param, my_targets, all_targets):
assert len(my_targets) == 1, "Expected 1 target; got %s" % str(my_targets)
card_name = my_targets[0]
assert card_name == "Tablet of Time's Dawn"
board.current_player().should_take_additional_turn = True
card = board.current_player().remove_card_from_constructs(card_name)
board.void.append(card)
raise_strategy_card_events(board, 'banished_from_deck', card_name)
# we know the only card with this effect is the Tablet of Time's Dawn
# and it's a construct
assert card.is_construct()
raise_strategy_card_events(board, 'construct_removed', card_name)
def _pay_less_runes_toward_construct(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected 0 targets; got %s" % str(my_targets)
board.current_player().runes_toward_constructs += param
def _gain_honor_for_defeating_monster(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected 0 targets; got %s" % str(my_targets)
board.give_honor(board.current_player(), param)
def _draw_for_two_or_more_constructs(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected 0 targets; got %s" % str(my_targets)
if len(board.current_player().constructs) >= 2:
board.current_player().draw_card()
def _gain_honor_per_faction_of_constructs(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected 0 targets; got %s" % str(my_targets)
construct_types = set(card.name for card in board.current_player().constructs)
board.give_honor(board.current_player(), param * len(construct_types))
def _get_opponent_indices(board):
return [i for i in xrange(len(board.players))
if i != board.current_player_index]
def _destroy_opponent_construct(board, opponent_index):
opponent = board.players[opponent_index]
card_name = board.strategies[opponent_index].choose_construct_for_discard(board)
card = opponent.remove_card_from_constructs(card_name)
opponent.discard.append(card)
raise_strategy_card_events_for_player(board, opponent_index, 'construct_removed', card_name)
def _each_opponent_destroys_construct(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected 0 targets; got %s" % str(my_targets)
opponent_indices = _get_opponent_indices(board)
for opponent_index in opponent_indices:
opponent = board.players[opponent_index]
if len(opponent.constructs) > 0:
_destroy_opponent_construct(board, opponent_index)
def _put_acquired_mechana_construct_into_play(board, param, my_targets, all_targets):
assert len(my_targets) == 1, "Expected 1 target; got %s" % str(my_targets)
card_name = my_targets[0]
card = board.current_player().remove_card_from_acquired_cards(card_name)
board.current_player().constructs.append(card)
def _treat_all_constructs_as_mechana_constructs(board, param, my_targets, all_targets):
# This purposefully does nothing. Its effect is used when other effects might
# need a mechana construct; the player simply checks if the associated card
# is in the player's constructs
pass
def _take_random_card_from_each_opponent(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
opponent_indices = _get_opponent_indices(board)
for opponent_index in opponent_indices:
opponent = board.players[opponent_index]
card_name = random.choice(opponent.hand).name
card = opponent.remove_card_from_hand(card_name)
raise_strategy_card_events_for_player(board, opponent_index, 'banished_from_deck', card_name)
board.current_player().hand.append(card)
raise_strategy_card_events(board, 'acquired_card', card_name)
def _gain_power_if_lifebound_hero_played(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
# Note that we exclude the most recently played card (this card)
if any(card.card_type == "Lifebound Hero"
for card in board.current_player().played_cards[:-1]):
board.current_player().power_remaining += param
def _opponents_destroy_all_but_one_construct(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
opponent_indices = _get_opponent_indices(board)
for opponent_index in opponent_indices:
opponent = board.players[opponent_index]
while len(opponent.constructs) > 1:
# TECHNICALLY this isn't actually quite correct but I feel like this
# simplification is close enough to correct that it doesn't matter.
_destroy_opponent_construct(board, opponent_index)
def _gain_power_for_each_mechana_construct(board, param, my_targets, all_targets):
assert len(my_targets) == 0, "Expected no targets; got %s" % str(my_targets)
num_mechana_constructs = sum(1 for card in board.current_player().constructs
if board.current_player().considers_card_mechana_construct(card))
board.current_player().power_remaining += num_mechana_constructs * param
def _copy_hero(board, param, my_targets, all_targets):
assert len(my_targets) == 1, "Expected 1 target; got %s" % str(my_targets)
card_name = my_targets[0]
assert any(card.name == card_name for card in board.current_player().played_cards), (
"Tried to copy the effect of a card that wasn't played")
card = board.current_player().remove_card_from_played_cards(card_name)
board.current_player().hand.append(card)
return [("play", card_name, all_targets)]
def _acquire_or_defeat_anything(board, param, my_targets, all_targets):
assert len(my_targets) == 1, "Expected 1 target; got %s" % str(my_targets)
card_name = my_targets[0]
card = board.card_dictionary.find_card(card_name)
if card.card_type == "Monster":
# The param for _defeat_monster is the upper bound of cost that the effect
# can defeat. In this case, we want anything so we give a very large upper
# bound.
_defeat_monster(board, 10000, my_targets, all_targets)
else:
board.current_player().runes_remaining += card.cost
return [("acquire", card_name, None)]
| |
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.checkout.scm.scm_mock import MockSCM
from webkitpy.common.host_mock import MockHost
from webkitpy.common.webkit_finder import WebKitFinder
class ExcludingMockSCM(MockSCM):
def __init__(self, exclusion_list, filesystem=None, executive=None):
MockSCM.__init__(self, filesystem, executive)
self._exclusion_list = exclusion_list
def exists(self, path):
if path in self._exclusion_list:
return False
return MockSCM.exists(self, path)
def delete(self, path):
return self.delete_list([path])
def delete_list(self, paths):
for path in paths:
if path in self._exclusion_list:
raise Exception("File is not SCM managed: " + path)
return MockSCM.delete_list(self, paths)
def move(self, origin, destination):
if origin in self._exclusion_list:
raise Exception("File is not SCM managed: " + origin)
return MockSCM.move(self, origin, destination)
class BaselineOptimizerTest(unittest.TestCase):
def test_move_baselines(self):
host = MockHost(scm=ExcludingMockSCM(['/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt']))
host.filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/VirtualTestSuites', '[]')
host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/win/another/test-expected.txt', 'result A')
host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt', 'result A')
host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt', 'result B')
baseline_optimizer = BaselineOptimizer(host, host.port_factory.get(), host.port_factory.all_port_names(), skip_scm_commands=False)
baseline_optimizer._move_baselines('another/test-expected.txt', {
'/mock-checkout/third_party/WebKit/LayoutTests/platform/win': 'aaa',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/mac': 'aaa',
'/mock-checkout/third_party/WebKit/LayoutTests': 'bbb',
}, {
'/mock-checkout/third_party/WebKit/LayoutTests': 'aaa',
})
self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt'), 'result A')
def test_move_baselines_skip_scm_commands(self):
host = MockHost(scm=ExcludingMockSCM(['/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt']))
host.filesystem.write_text_file('/mock-checkout/third_party/WebKit/LayoutTests/VirtualTestSuites', '[]')
host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/win/another/test-expected.txt', 'result A')
host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/another/test-expected.txt', 'result A')
host.filesystem.write_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt', 'result B')
baseline_optimizer = BaselineOptimizer(host, host.port_factory.get(), host.port_factory.all_port_names(), skip_scm_commands=True)
baseline_optimizer._move_baselines('another/test-expected.txt', {
'/mock-checkout/third_party/WebKit/LayoutTests/platform/win': 'aaa',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/mac': 'aaa',
'/mock-checkout/third_party/WebKit/LayoutTests': 'bbb',
}, {
'/mock-checkout/third_party/WebKit/LayoutTests/platform/linux': 'bbb',
'/mock-checkout/third_party/WebKit/LayoutTests': 'aaa',
})
self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt'), 'result A')
self.assertEqual(baseline_optimizer._files_to_delete, [
'/mock-checkout/third_party/WebKit/LayoutTests/platform/win/another/test-expected.txt',
])
self.assertEqual(baseline_optimizer._files_to_add, [
'/mock-checkout/third_party/WebKit/LayoutTests/another/test-expected.txt',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/linux/another/test-expected.txt',
])
def _assertOptimization(self, results_by_directory, expected_new_results_by_directory, baseline_dirname='', expected_files_to_delete=None, host=None):
if not host:
host = MockHost()
fs = host.filesystem
webkit_base = WebKitFinder(fs).webkit_base()
baseline_name = 'mock-baseline-expected.txt'
fs.write_text_file(fs.join(webkit_base, 'LayoutTests', 'VirtualTestSuites'),
'[{"prefix": "gpu", "base": "fast/canvas", "args": ["--foo"]}]')
for dirname, contents in results_by_directory.items():
path = fs.join(webkit_base, 'LayoutTests', dirname, baseline_name)
fs.write_binary_file(path, contents)
baseline_optimizer = BaselineOptimizer(host, host.port_factory.get(), host.port_factory.all_port_names(), skip_scm_commands=expected_files_to_delete is not None)
self.assertTrue(baseline_optimizer.optimize(fs.join(baseline_dirname, baseline_name)))
for dirname, contents in expected_new_results_by_directory.items():
path = fs.join(webkit_base, 'LayoutTests', dirname, baseline_name)
if contents is None:
self.assertTrue(not fs.exists(path) or path in baseline_optimizer._files_to_delete)
else:
self.assertEqual(fs.read_binary_file(path), contents)
# Check that the files that were in the original set have been deleted where necessary.
for dirname in results_by_directory:
path = fs.join(webkit_base, 'LayoutTests', dirname, baseline_name)
if not dirname in expected_new_results_by_directory:
self.assertTrue(not fs.exists(path) or path in baseline_optimizer._files_to_delete)
if expected_files_to_delete:
self.assertEqual(sorted(baseline_optimizer._files_to_delete), sorted(expected_files_to_delete))
def test_linux_redundant_with_win(self):
self._assertOptimization({
'platform/win': '1',
'platform/linux': '1',
}, {
'platform/win': '1',
})
def test_covers_mac_win_linux(self):
self._assertOptimization({
'platform/mac': '1',
'platform/win': '1',
'platform/linux': '1',
'': None,
}, {
'': '1',
})
def test_overwrites_root(self):
self._assertOptimization({
'platform/mac': '1',
'platform/win': '1',
'platform/linux': '1',
'': '2',
}, {
'': '1',
})
def test_no_new_common_directory(self):
self._assertOptimization({
'platform/mac': '1',
'platform/linux': '1',
'': '2',
}, {
'platform/mac': '1',
'platform/linux': '1',
'': '2',
})
def test_local_optimization(self):
self._assertOptimization({
'platform/mac': '1',
'platform/linux': '1',
'platform/linux-x86': '1',
}, {
'platform/mac': '1',
'platform/linux': '1',
})
def test_local_optimization_skipping_a_port_in_the_middle(self):
self._assertOptimization({
'platform/mac-snowleopard': '1',
'platform/win': '1',
'platform/linux-x86': '1',
}, {
'platform/mac-snowleopard': '1',
'platform/win': '1',
})
def test_baseline_redundant_with_root(self):
self._assertOptimization({
'platform/mac': '1',
'platform/win': '2',
'': '2',
}, {
'platform/mac': '1',
'': '2',
})
def test_root_baseline_unused(self):
self._assertOptimization({
'platform/mac': '1',
'platform/win': '2',
'': '3',
}, {
'platform/mac': '1',
'platform/win': '2',
})
def test_root_baseline_unused_and_non_existant(self):
self._assertOptimization({
'platform/mac': '1',
'platform/win': '2',
}, {
'platform/mac': '1',
'platform/win': '2',
})
def test_virtual_root_redundant_with_actual_root(self):
self._assertOptimization({
'virtual/gpu/fast/canvas': '2',
'fast/canvas': '2',
}, {
'virtual/gpu/fast/canvas': None,
'fast/canvas': '2',
}, baseline_dirname='virtual/gpu/fast/canvas')
def test_virtual_root_redundant_with_ancestors(self):
self._assertOptimization({
'virtual/gpu/fast/canvas': '2',
'platform/mac/fast/canvas': '2',
'platform/win/fast/canvas': '2',
}, {
'virtual/gpu/fast/canvas': None,
'fast/canvas': '2',
}, baseline_dirname='virtual/gpu/fast/canvas')
def test_virtual_root_redundant_with_ancestors_skip_scm_commands(self):
self._assertOptimization({
'virtual/gpu/fast/canvas': '2',
'platform/mac/fast/canvas': '2',
'platform/win/fast/canvas': '2',
}, {
'virtual/gpu/fast/canvas': None,
'fast/canvas': '2',
},
baseline_dirname='virtual/gpu/fast/canvas',
expected_files_to_delete=[
'/mock-checkout/third_party/WebKit/LayoutTests/virtual/gpu/fast/canvas/mock-baseline-expected.txt',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/fast/canvas/mock-baseline-expected.txt',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/win/fast/canvas/mock-baseline-expected.txt',
])
def test_virtual_root_redundant_with_ancestors_skip_scm_commands_with_file_not_in_scm(self):
self._assertOptimization({
'virtual/gpu/fast/canvas': '2',
'platform/mac/fast/canvas': '2',
'platform/win/fast/canvas': '2',
}, {
'virtual/gpu/fast/canvas': None,
'fast/canvas': '2',
},
baseline_dirname='virtual/gpu/fast/canvas',
expected_files_to_delete=[
'/mock-checkout/third_party/WebKit/LayoutTests/platform/mac/fast/canvas/mock-baseline-expected.txt',
'/mock-checkout/third_party/WebKit/LayoutTests/platform/win/fast/canvas/mock-baseline-expected.txt',
],
host=MockHost(scm=ExcludingMockSCM(['/mock-checkout/third_party/WebKit/LayoutTests/virtual/gpu/fast/canvas/mock-baseline-expected.txt'])))
def test_virtual_root_not_redundant_with_ancestors(self):
self._assertOptimization({
'virtual/gpu/fast/canvas': '2',
'platform/mac/fast/canvas': '1',
}, {
'virtual/gpu/fast/canvas': '2',
'platform/mac/fast/canvas': '1',
}, baseline_dirname='virtual/gpu/fast/canvas')
| |
# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.
import sys
import os.path
import re
import operator
import digits
from digits import utils
from digits.utils import subclass, override
from digits.task import Task
# NOTE: Increment this everytime the pickled version changes
PICKLE_VERSION = 3
@subclass
class CreateDbTask(Task):
"""Creates a database"""
def __init__(self, input_file, db_name, image_dims, **kwargs):
"""
Arguments:
input_file -- read images and labels from this file
db_name -- save database to this location
image_dims -- (height, width, channels)
Keyword Arguments:
image_folder -- prepend image paths with this folder
shuffle -- shuffle images before saving
resize_mode -- used in utils.image.resize_image()
encoding -- 'none', 'png' or 'jpg'
mean_file -- save mean file to this location
backend -- type of database to use
labels_file -- used to print category distribution
"""
# Take keyword arguments out of kwargs
self.image_folder = kwargs.pop('image_folder', None)
self.shuffle = kwargs.pop('shuffle', True)
self.resize_mode = kwargs.pop('resize_mode' , None)
self.encoding = kwargs.pop('encoding', None)
self.mean_file = kwargs.pop('mean_file', None)
self.backend = kwargs.pop('backend', None)
self.labels_file = kwargs.pop('labels_file', None)
super(CreateDbTask, self).__init__(**kwargs)
self.pickver_task_createdb = PICKLE_VERSION
self.input_file = input_file
self.db_name = db_name
self.image_dims = image_dims
if image_dims[2] == 3:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = None
self.entries_count = None
self.distribution = None
def __getstate__(self):
d = super(CreateDbTask, self).__getstate__()
if 'labels' in d:
del d['labels']
return d
def __setstate__(self, state):
super(CreateDbTask, self).__setstate__(state)
if self.pickver_task_createdb <= 1:
print 'Upgrading CreateDbTask to version 2'
if self.image_dims[2] == 1:
self.image_channel_order = None
elif self.encode:
self.image_channel_order = 'BGR'
else:
self.image_channel_order = 'RGB'
if self.pickver_task_createdb <= 2:
print 'Upgrading CreateDbTask to version 3'
if hasattr(self, 'encode'):
if self.encode:
self.encoding = 'jpg'
else:
self.encoding = 'none'
delattr(self, 'encode')
else:
self.encoding = 'none'
self.pickver_task_createdb = PICKLE_VERSION
@override
def name(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'Create DB (train)'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'Create DB (val)'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'Create DB (test)'
else:
return 'Create DB (%s)' % self.db_name
@override
def html_id(self):
if self.db_name == utils.constants.TRAIN_DB or 'train' in self.db_name.lower():
return 'task-create_db-train'
elif self.db_name == utils.constants.VAL_DB or 'val' in self.db_name.lower():
return 'task-create_db-val'
elif self.db_name == utils.constants.TEST_DB or 'test' in self.db_name.lower():
return 'task-create_db-test'
else:
return super(CreateDbTask, self).html_id()
@override
def offer_resources(self, resources):
key = 'create_db_task_pool'
if key not in resources:
return None
for resource in resources[key]:
if resource.remaining() >= 1:
return {key: [(resource.identifier, 1)]}
return None
@override
def task_arguments(self, resources):
args = [sys.executable, os.path.join(os.path.dirname(os.path.dirname(digits.__file__)), 'tools', 'create_db.py'),
self.path(self.input_file),
self.path(self.db_name),
self.image_dims[1],
self.image_dims[0],
'--channels=%s' % self.image_dims[2],
'--resize_mode=%s' % self.resize_mode,
]
if self.mean_file is not None:
args.append('--mean_file=%s' % self.path(self.mean_file))
# Add a visual mean_file
args.append('--mean_file=%s' % self.path(utils.constants.MEAN_FILE_IMAGE))
if self.image_folder:
args.append('--image_folder=%s' % self.image_folder)
if self.shuffle:
args.append('--shuffle')
if self.encoding and self.encoding != 'none':
args.append('--encoding=%s' % self.encoding)
return args
@override
def process_output(self, line):
from digits.webapp import socketio
timestamp, level, message = self.preprocess_output_digits(line)
if not message:
return False
# progress
match = re.match(r'Processed (\d+)\/(\d+)', message)
if match:
self.progress = float(match.group(1))/int(match.group(2))
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'progress',
'percentage': int(round(100*self.progress)),
'eta': utils.time_filters.print_time_diff(self.est_done()),
},
namespace='/jobs',
room=self.job_id,
)
return True
# distribution
match = re.match(r'Category (\d+) has (\d+)', message)
if match and self.labels_file is not None:
if not hasattr(self, 'distribution') or self.distribution is None:
self.distribution = {}
self.distribution[match.group(1)] = int(match.group(2))
data = self.distribution_data()
if data:
socketio.emit('task update',
{
'task': self.html_id(),
'update': 'distribution',
'data': data,
},
namespace='/jobs',
room=self.job_id,
)
return True
# result
match = re.match(r'Total images added: (\d+)', message)
if match:
self.entries_count = int(match.group(1))
self.logger.debug(message)
return True
if level == 'warning':
self.logger.warning('%s: %s' % (self.name(), message))
return True
if level in ['error', 'critical']:
self.logger.error('%s: %s' % (self.name(), message))
self.exception = message
return True
return True
def get_labels(self):
"""
Read labels from labels_file and return them in a list
"""
# The labels might be set already
if hasattr(self, '_labels') and self._labels and len(self._labels) > 0:
return self._labels
assert hasattr(self, 'labels_file'), 'labels_file not set'
assert self.labels_file, 'labels_file not set'
assert os.path.exists(self.path(self.labels_file)), 'labels_file does not exist'
labels = []
with open(self.path(self.labels_file)) as infile:
for line in infile:
label = line.strip()
if label:
labels.append(label)
assert len(labels) > 0, 'no labels in labels_file'
self._labels = labels
return self._labels
def distribution_data(self):
"""
Returns distribution data for a C3.js graph
"""
if self.distribution is None:
return None
try:
labels = self.get_labels()
except AssertionError:
return None
if len(self.distribution.keys()) != len(labels):
return None
values = ['Count']
titles = []
for key, value in sorted(
self.distribution.items(),
key=operator.itemgetter(1),
reverse=True):
values.append(value)
titles.append(labels[int(key)])
return {
'data': {
'columns': [values],
'type': 'bar'
},
'axis': {
'x': {
'type': 'category',
'categories': titles,
}
},
}
| |
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
import tracing_project
from hooks import install
from paste import httpserver
from paste import fileapp
import webapp2
from webapp2 import Route, RedirectHandler
def _GetFilesIn(basedir):
data_files = []
for dirpath, dirnames, filenames in os.walk(basedir, followlinks=True):
new_dirnames = [d for d in dirnames if not d.startswith('.')]
del dirnames[:]
dirnames += new_dirnames
for f in filenames:
if f.startswith('.'):
continue
if f == 'README.md':
continue
full_f = os.path.join(dirpath, f)
rel_f = os.path.relpath(full_f, basedir)
data_files.append(rel_f)
data_files.sort()
return data_files
def _RelPathToUnixPath(p):
return p.replace(os.sep, '/')
class TestListHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
test_relpaths = ['/' + _RelPathToUnixPath(x)
for x in self.app.project.FindAllTestModuleRelPaths()]
tests = {'test_relpaths': test_relpaths}
tests_as_json = json.dumps(tests)
self.response.content_type = 'application/json'
return self.response.write(tests_as_json)
class TestResultHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs): # pylint: disable=unused-argument
msg = self.request.body
ostream = sys.stdout if 'PASSED' in msg else sys.stderr
ostream.write(msg + '\n')
return self.response.write('')
class TestsCompletedHandler(webapp2.RequestHandler):
def post(self, *args, **kwargs): # pylint: disable=unused-argument
msg = self.request.body
sys.stdout.write(msg + '\n')
exit_code=(0 if 'ALL_PASSED' in msg else 1)
if hasattr(self.app.server, 'please_exit'):
self.app.server.please_exit(exit_code)
return self.response.write('')
class DirectoryListingHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
source_path = kwargs.pop('_source_path', None)
mapped_path = kwargs.pop('_mapped_path', None)
assert mapped_path.endswith('/')
data_files_relative_to_top = _GetFilesIn(source_path)
data_files = [mapped_path + x
for x in data_files_relative_to_top]
files_as_json = json.dumps(data_files)
self.response.content_type = 'application/json'
return self.response.write(files_as_json)
class FileAppWithGZipHandling(fileapp.FileApp):
def guess_type(self):
content_type, content_encoding = \
super(FileAppWithGZipHandling, self).guess_type()
if not self.filename.endswith('.gz'):
return content_type, content_encoding
# By default, FileApp serves gzip files as their underlying type with
# Content-Encoding of gzip. That causes them to show up on the client
# decompressed. That ends up being surprising to our xhr.html system.
return None, None
class SourcePathsHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
source_paths = kwargs.pop('_source_paths', [])
path = self.request.path
# This is how we do it. Its... strange, but its what we've done since
# the dawn of time. Aka 4 years ago, lol.
for mapped_path in source_paths:
rel = os.path.relpath(path, '/')
candidate = os.path.join(mapped_path, rel)
if os.path.exists(candidate):
app = FileAppWithGZipHandling(candidate)
app.cache_control(no_cache=True)
return app
self.abort(404)
class SimpleDirectoryHandler(webapp2.RequestHandler):
def get(self, *args, **kwargs): # pylint: disable=unused-argument
top_path = os.path.abspath(kwargs.pop('_top_path', None))
if not top_path.endswith(os.path.sep):
top_path += os.path.sep
joined_path = os.path.abspath(
os.path.join(top_path, kwargs.pop('rest_of_path')))
if not joined_path.startswith(top_path):
self.response.set_status(403)
return
app = FileAppWithGZipHandling(joined_path)
app.cache_control(no_cache=True)
return app
def CreateApp(project=None,
test_data_path=None,
skp_data_path=None):
if project is None:
project = tracing_project.TracingProject()
routes = [
Route('', RedirectHandler, defaults={'_uri': '/tracing/tests.html'}),
Route('/', RedirectHandler, defaults={'_uri': '/tracing/tests.html'}),
Route('/base/tests.html', RedirectHandler,
defaults={'_uri': '/tracing/tests.html'}),
Route('/tracing/tests', TestListHandler),
Route('/tracing/notify_test_result', TestResultHandler),
Route('/tracing/notify_tests_completed', TestsCompletedHandler)
]
# Test data system.
if not test_data_path:
test_data_path = project.test_data_path
routes.append(Route('/test_data/__file_list__', DirectoryListingHandler,
defaults={
'_source_path': test_data_path,
'_mapped_path': '/test_data/'
}))
routes.append(Route('/test_data/<rest_of_path:.+>', SimpleDirectoryHandler,
defaults={'_top_path': test_data_path}))
if not skp_data_path:
skp_data_path = project.skp_data_path
routes.append(Route('/skp_data/__file_list__', DirectoryListingHandler,
defaults={
'_source_path': skp_data_path,
'_mapped_path': '/skp_data/'
}))
routes.append(Route('/skp_data/<rest_of_path:.+>', SimpleDirectoryHandler,
defaults={'_top_path': skp_data_path}))
# This must go last, because its catch-all.
#
# Its funky that we have to add in the root path. The long term fix is to
# stop with the crazy multi-source-pathing thing.
all_paths = list(project.source_paths) + [project.tracing_root_path]
routes.append(
Route('/<:.+>', SourcePathsHandler,
defaults={'_source_paths': all_paths}))
app = webapp2.WSGIApplication(routes=routes, debug=True)
app.project = project
return app
def _AddPleaseExitMixinToServer(server):
# Shutting down httpserver gracefully and yielding a return code requires
# a bit of mixin code.
exitCodeAttempt = []
def please_exit(exitCode):
if len(exitCodeAttempt) > 0:
return
exitCodeAttempt.append(exitCode)
server.running = False
real_serve_forever = server.serve_forever
def serve_forever():
try:
real_serve_forever()
except KeyboardInterrupt:
# allow CTRL+C to shutdown
return 255
if len(exitCodeAttempt) == 1:
return exitCodeAttempt[0]
# The serve_forever returned for some reason separate from
# exit_please.
return 0
server.please_exit = please_exit
server.serve_forever = serve_forever
def Main(argv):
project = tracing_project.TracingProject()
parser = argparse.ArgumentParser(description='Run tracing development server')
parser.add_argument(
'--no-install-hooks', dest='install_hooks', action='store_false')
parser.add_argument(
'-d', '--data-dir',
default=project.test_data_path)
parser.add_argument(
'-s', '--skp-data-dir',
default=os.path.abspath(os.path.join(project.skp_data_path)))
parser.add_argument('-p', '--port', default=8003, type=int)
args = parser.parse_args(args=argv[1:])
if args.install_hooks:
install.InstallHooks()
app = CreateApp(project,
test_data_path=args.data_dir,
skp_data_path=args.skp_data_dir)
server = httpserver.serve(app, host='127.0.0.1', port=args.port,
start_loop=False)
_AddPleaseExitMixinToServer(server)
app.server = server
sys.stderr.write('Now running on http://127.0.0.1:%i\n' % args.port)
return server.serve_forever()
| |
# -*- coding: utf-8 -*-
"""
Network Motifs
===============
Simple network motifs in Networkx.DiGraph format that can be directly loaded.
"""
# Copyright (C) 2021 by
# Alex Gates <ajgates@gmail.com>
# Rion Brattig Correia <rionbr@gmail.com>
# All rights reserved.
# MIT license.
import networkx as nx
def network_motif(name=None):
"""Graph motifs from :cite:`Milo:2012`.
Args:
name (string): The name of the motif.
Possible values are : ``FeedForward``, ``Fan``, ``FeedForwardSelf1``,
``FeedForwardSelf2``, ``FeedForwardSelf3``, ``FeedForwardSelf123``,
``BiFan``, ``CoRegulated``, ``CoRegulating``, ``BiParallel``,
``TriParallel``, ``Dominating4``, ``Dominating4Undir``, ``3Loop``,
``4Loop``, ``3LoopSelf123``, ``FourLoop``, ``FourCoLoop``,
``DirectedTwoLoop``, ``BiParallelLoop``, ``5Chain``, ``3Chain``,
``KeffStudy3``, ``KeffStudy4``, ``CoRegulatedSelf``, ``KeffLine4``,
``KeffLineLoop4``, ``3Full``, ``6Pyramid``, ``4Split``, ``5BiParallel``,
``6BiParallelDilation``, ``6BiParallelDilationLoop``, ``5combine``, ``4tree``.
Returns:
(networkx.DiGraph) : The directed graph motif.
"""
graph = nx.DiGraph()
if name == "FeedForward":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
elif name == "Fan":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
elif name == "FeedForwardSelf1":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(0, 0)
elif name == "FeedForwardSelf2":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(1, 1)
elif name == "FeedForwardSelf3":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 2)
elif name == "FeedForwardSelf123":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(0, 0)
graph.add_edge(1, 1)
graph.add_edge(2, 2)
elif name == "BiFan":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
elif name == "CoRegulated":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 1)
elif name == "CoRegulating":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 0)
graph.add_edge(1, 2)
elif name == "BiParallel":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
elif name == "TriParallel":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(0, 3)
elif name == "Dominating4":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "Dominating4Undir":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(1, 0)
graph.add_edge(2, 0)
graph.add_edge(3, 0)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "3Loop":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 0)
elif name == "4Loop":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 0)
elif name == "3LoopSelf123":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 0)
graph.add_edge(0, 0)
graph.add_edge(1, 1)
graph.add_edge(2, 2)
elif name == "FourLoop":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "FourCoLoop":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "DirectedTwoLoop":
graph.add_edge(0, 1)
graph.add_edge(1, 0)
graph.add_edge(2, 3)
graph.add_edge(3, 2)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
elif name == "BiParallelLoop":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(3, 0)
elif name == "5Chain":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 4)
elif name == "3Chain":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
elif name == "KeffStudy3":
graph.add_edge(0, 1)
graph.add_edge(1, 0)
graph.add_edge(0, 2)
graph.add_edge(2, 0)
elif name == "KeffStudy4":
graph.add_edge(0, 1)
graph.add_edge(1, 0)
graph.add_edge(0, 2)
graph.add_edge(2, 0)
graph.add_edge(0, 3)
graph.add_edge(3, 0)
elif name == "CoRegulatedSelf":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 1)
graph.add_edge(0, 0)
elif name == "KeffLine4":
graph.add_edge(0, 1)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
elif name == "KeffLineLoop4":
graph.add_edge(0, 1)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(3, 0)
elif name == "3Full":
graph.add_edge(0, 0)
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 0)
graph.add_edge(1, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 0)
graph.add_edge(2, 1)
graph.add_edge(2, 2)
elif name == "6Pyramid":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 4)
graph.add_edge(2, 5)
elif name == "4Split":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(0, 3)
elif name == "5BiParallel":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(0, 4)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
elif name == "6BiParallelDilation":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(0, 4)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(2, 5)
graph.add_edge(3, 5)
graph.add_edge(4, 5)
elif name == "6BiParallelDilationLoop":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(0, 4)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(2, 5)
graph.add_edge(3, 5)
graph.add_edge(4, 5)
graph.add_edge(5, 1)
elif name == "5combine":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(3, 0)
graph.add_edge(3, 4)
graph.add_edge(4, 2)
elif name == "4tree":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
else:
raise TypeError('The motif name could not be found.')
return graph
| |
"""Contains the Queue class and the StopMode enumeration."""
import json
import logging
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from enum import Enum
from threading import Lock
from time import sleep
from bidon.db.access import transaction, pg_advisory_lock as adv
from bidon.util.date import utc_now
from .model import Job, SerializationKey
from .util import exception_to_message
LOGGER = logging.getLogger("pypgq")
CHANNEL_NAME = "pypgq_job_changed"
RunningJob = namedtuple("RunningJob", ["job", "sz_key", "future", "done"])
RunningJob.__new__.__defaults__ = (False, )
class StopMode(Enum):
"""Holds the states options for the queue."""
never = 0x01
when_all_done = 0x02
when_current_done = 0x04
now = 0x08
class Cooperative(object):
"""A class defining the cooperative modes for queues. All Queues running on
a schema must be using the exact same cooperative mode.
"""
none = 0x01
advisory_lock = 0x02
row_lock = 0x04
def __init__(self, mode=None, args=None):
"""Initializes the cooperative instance
:param mode: the cooperative mode. One of:
- Cooperative.none: the queue will not try to cooperate
- Cooperative.advisory_lock: the queue will cooperate via pg_advisory_lock
- Cooperative.row_lock: the queue will cooperative via
`select for update skip locked`. Note this is only
available in postgres 9.5+
:param args: the additional args required by the mode.
- Cooperative.none: does not use args
- Cooperative.advisory_lock: either an integer, or a 2-tuple of integers, which is
the key that the queue will use when obtaining
advisory locks. Note that all queues operating on the
same schema must all use the same advisory lock key
- Cooperative.row_lock: does not use args
"""
self.mode = mode or self.none
self.args = args
class Queue(object):
"""The job manager."""
# pylint: disable=too-many-instance-attributes
def __init__(self, model_access, worker_count=10, cooperative=None, schedule_frequency=60):
"""Initializes the Queue instance.
:param model_access: a bidon.data.ModelAccess instance
:param worker_count: the maximum number of workers to run at any one time
:param cooperative: if True, the queue will pull workers using the Postgres
9.5+ feature `select for update skip locked` which will
enable multiple, cooperative queue processes.
:param schedule_frequency: the time delay, in seconds, between calls to schedule_jobs. If None
this queue will not try to schedule jobs.
"""
self._model_access = model_access
self._worker_count = worker_count
self._cooperative = cooperative
self._schedule_frequency = schedule_frequency
self._job_handlers = {}
self._job_lock = Lock()
self._executor = ThreadPoolExecutor(max_workers=worker_count)
self._stop_mode = StopMode.never
self._stop_callback = None
self._waiting_job_count = 0
self._running_jobs = {}
self._completed_jobs = 0
self._sleep_time = 0.1
self._last_job_scheduling = None
def add_handler(self, name, handler):
"""Registers a function to handle jobs with a given name.
:param name: the job name to handle
:param handler: the function to handle the job. This function should accept a
single dict argument
"""
if name in self._job_handlers:
raise KeyError("A handler has already been registered for {}".format(name))
self._job_handlers[name] = handler
def start(self):
"""Starts the job handling loop."""
self._loop()
def stop(self, callback=None, stop_mode=StopMode.when_current_done):
"""Tell the job manager to stop.
If the stop manner is less restrictive than the current stop manner, this
function will do nothing.
:param callback: assign a callback to be called when the manager stops
:param stop_mode: an instance of the StopMode enum
"""
if self._stop_mode.value < stop_mode.value:
self._stop_callback = callback
self._stop_mode = stop_mode
LOGGER.info("Stop %s requested", stop_mode.name.replace("_", " "))
def status(self):
"""Returns the current status of the JobQueue."""
return dict(waiting_jobs=self._waiting_job_count,
running_jobs=len(self._running_jobs),
running_job_ids=tuple(sorted(self._running_jobs.keys())),
completed_jobs=self._completed_jobs,
stop_mode=self._stop_mode)
def _loop(self):
"""The job handling loop. Runs until the state corresponding to
self._stop_mode is matched.
"""
LOGGER.info("Starting")
# NOTE: There's a possible race condition here between when we start
# listening and when we get the waiting job count. If a job is added
# between starting to listen, and checking our count, the
self._model_access.execute("listen {};".format(CHANNEL_NAME))
self._waiting_job_count = self._model_access.count(Job.table_name, "started_at is null")
self._running_jobs = {}
while self._stop_mode != StopMode.now:
sjcount = self._schedule_jobs()
if sjcount is not None:
LOGGER.info("Scheduled %s jobs", sjcount)
self._update_job_list()
# If the stop mode is never, or when all are done, continue to add jobs
if self._stop_mode in (StopMode.never, StopMode.when_all_done):
self._start_jobs()
if not self._running_jobs:
if self._stop_mode == StopMode.when_current_done:
break
if self._stop_mode == StopMode.when_all_done and self._waiting_job_count == 0:
break
sleep(self._sleep_time)
# Cleanup any remaining futures. This will only happen when StopMode.now was requested.
for (job_id, rjob) in self._running_jobs.items():
rjob.future.cancel()
self._model_access.update(Job.table_name,
dict(started_at=None,
completed_at=None,
error_message=None),
dict(id=job_id))
if rjob.sz_key:
self._model_access.update(SerializationKey.table_name,
dict(active_job_id=None),
dict(id=rjob.sz_key.id))
LOGGER.info("Cancelled job %s", job_id)
LOGGER.info("Stopping")
self._model_access.close()
if self._stop_callback:
self._stop_callback()
def _schedule_jobs(self):
# Don't schedule jobs if the frequency is None
if self._schedule_frequency is None:
return None
# Only perform a diff check if jobs have been scheduled
if self._last_job_scheduling is not None:
ts = utc_now().timestamp()
# If the last scheduling timestamp plus the frequency is greater than the
# current timestamp, it is not yet time to schedule.
if self._last_job_scheduling + self._schedule_frequency > ts:
return None
with transaction(self._model_access):
cr, _ = self._model_access.callproc("schedule_jobs", [])
job_count = self._model_access.get_scalar(cr)
self._last_job_scheduling = utc_now().timestamp()
return job_count
def _update_job_list(self):
"""Makes changes to the waiting job list and the running job list based on
received notifications.
"""
cn = self._model_access.connection
# Gather any waiting notifications and update the job status info
# accordingly
cn.poll()
for notify in cn.notifies:
payload = json.loads(notify.payload)
status = payload["status"]
job_id = payload["job_id"]
if status == "created":
self._waiting_job_count += 1
elif status == "started":
if job_id not in self._running_jobs:
self._waiting_job_count -= 1
elif status == "completed":
pass
else:
LOGGER.warning("Unknown job status %s for job %s", status, job_id)
cn.notifies.clear()
# Remove any completed jobs
with self._job_lock:
for job_id in [k for k, v in self._running_jobs.items() if v.done]:
rjob = self._running_jobs[job_id]
if rjob.done:
self._finished_job(rjob.job, rjob.sz_key, rjob.future)
self._running_jobs.pop(job_id)
if rjob.job.error_message:
LOGGER.info("Completed job %s with error", job_id, )
else:
LOGGER.info("Completed job %s", job_id, )
self._completed_jobs += 1
def _start_jobs(self):
"""Spawns as many new jobs as needed and possible."""
available_workers = self._worker_count - len(self._running_jobs)
start_new_count = min(self._waiting_job_count, available_workers)
while start_new_count > 0:
(job, sz_key, future) = self._start_a_job()
if job:
LOGGER.info("Started job %s", job.id)
self._running_jobs[job.id] = RunningJob(job, sz_key, future, False)
self._waiting_job_count -= 1
start_new_count -= 1
self._set_done_callback(future, job)
else:
start_new_count = 0
def _start_a_job(self):
"""Either starts a waiting job and returns a 3-tuple of (job, sz_key, future),
or finds no waiting job and returns a 3-tuple of (None, None, None).
"""
with transaction(self._model_access):
(job, sz_key) = self._get_next_job()
if job is None:
return (None, None, None)
job.started_at = utc_now()
if sz_key:
sz_key.active_job_id = job.id
self._update_job(job, sz_key)
def fxn():
"""Future closure."""
if job.name not in self._job_handlers:
raise KeyError("Bad job name")
self._job_handlers[job.name](job.payload)
future = self._executor.submit(fxn)
return (job, sz_key, future)
def _get_next_job(self):
"""Returns a 2-tuple of (job, serialization_key) for the highest priority
waiting job that has an open serialization key. Returns (None, None) if
no such job exists.
"""
if not self._cooperative:
return self._get_next_job_inner()
else:
if self._cooperative.mode == Cooperative.none:
return self._get_next_job_inner()
elif self._cooperative.mode == Cooperative.advisory_lock:
adv.obtain_lock(self._model_access, self._cooperative.args, xact=True)
return self._get_next_job_inner()
elif self._cooperative.mode == Cooperative.row_lock:
return self._get_next_job_inner(row_lock=True)
def _get_next_job_inner(self, *, row_lock=False):
"""Returns a 2-tuple of (job, serialization_key) for the highest priority
waiting job that has an open serialization key. Returns (None, None) if
no such job exists.
"""
sql_fmt = ("select j.* "
"from {job_table_name} as j "
"left join {szk_table_name} as k on j.serialization_key_id = k.id "
"where j.started_at is null and k.active_job_id is null "
"order by j.priority desc, j.created_at asc "
"limit 1")
if row_lock:
sql_fmt += " for update skip locked"
job_data = self._model_access.execute(
sql_fmt.format(
job_table_name=Job.table_name,
szk_table_name=SerializationKey.table_name)).fetchone()
if job_data is None:
return (None, None)
job = Job(job_data)
if job.serialization_key_id:
sz_key = self._model_access.find_model_by_id(SerializationKey, job.serialization_key_id)
else:
sz_key = None
return (job, sz_key)
def _set_done_callback(self, future, job):
"""Sets the done_callback for the future and job. It is necessary to do this
in a funciton here, rather than in a lambda in the loop, because the value
of `job` changes during the loop.
:param future: the future instance that is running the job
:param job: the job model that is being run by the future
"""
future.add_done_callback(lambda _: self._mark_job_done(job.id))
def _update_job(self, job, sz_key):
"""Updates a job, and if present, its serialization key."""
self._model_access.update_model(job,
include_keys={"started_at", "completed_at", "error_message"})
if sz_key:
self._model_access.update_model(sz_key, include_keys={"active_job_id"})
def _mark_job_done(self, job_id):
"""Marks the job with id :job_id: done."""
with self._job_lock:
rjob = self._running_jobs[job_id]
self._running_jobs[job_id] = RunningJob(rjob.job, rjob.sz_key, rjob.future, True)
def _finished_job(self, job, sz_key, future):
"""Marks the job as complete.
:param job: the Job instance
:param sz_key: the SerializationKey instance
:param future: the Future instance that handled the job
"""
error_message = exception_to_message(future.exception())
job.update(completed_at=utc_now(), error_message=error_message)
if sz_key:
sz_key.active_job_id = None
with transaction(self._model_access):
self._update_job(job, sz_key)
| |
#!/usr/bin/env python
__author__ = "Andre Merzky, Matteo Turilli, Ole Weidner"
__copyright__ = "Copyright 2013, The SAGA Project"
__license__ = "MIT"
import os
import sys
import saga
import time
"""
This is an example which shows how to access Amazon EC2 clouds via the SAGA
resource package. The code expects the environment variables EC2_ACCESS_KEY and
EC2_SECRET_KEY to contain the respective authentication tokens required for EC2
access. It also expects EC2_KEYPAIR to point to the ssh key to be used in the
EC2 keypair authentication.
This program has different modes of operation:
* *Help* ::
# python examples/resource/ec2.py -h
Usage:
%s -l : list VMs
%s -c <id> [...] : create VM
%s -u <id> [...] : use VMs (run jobs)
%s -d <id> [...] : destroy VMs
Environment:
EC2_URL : backend manager service endpoint
EC2_ACCESS_KEY : id for backend access
EC2_SECRET_KEY : key for backend access
EC2_KEYPAIR_ID : name of keypair for VM access
EC2_KEYPAIR : public ssh key for VM access
* *Listing* of templates and existing VMs on EC2::
# python examples/resource/ec2.py -l
compute resources
[ec2://aws.amazon.com/]-[i-cba515ab]
[ec2://aws.amazon.com/]-[i-f93f2299]
compute templates
Micro Instance
Small Instance
Medium Instance
Large Instance
Extra Large Instance
High-Memory Extra Large Instance
High-Memory Double Extra Large Instance
High-Memory Quadruple Extra Large Instance
Extra Large Instance
Double Extra Large Instance
High-CPU Medium Instance
High-CPU Extra Large Instance
Cluster Compute Quadruple Extra Large Instance
Cluster Compute Eight Extra Large Instance
Cluster GPU Quadruple Extra Large Instance
High Memory Cluster Eight Extra Large
High Storage Eight Extra Large Instance
* *Creating* a VM instance on EC2::
# python examples/resource/ec2.py -c
Created VM
id :
state : PENDING (pending)
access : None
* *Using* a VM instance on EC2::
# python examples/resource/ec2.py -u '[ec2://aws.amazon.com/]-[i-e0d2ad8a]'
connecting to [ec2://aws.amazon.com/]-[i-e0d2ad8a]
id : [ec2://aws.amazon.com/]-[i-e0d2ad8a]
state : PENDING (pending)
wait for ACTIVE state
state : ACTIVE (running)
running job
job state : Running
job state : Done
* *Destroying* a VMs instance on EC2::
# python examples/resource/ec2.py -d '[ec2://aws.amazon.com/]-[i-e0d2ad8a]'
reconnecting to id [ec2://aws.amazon.com/]-[i-e0d2ad8a]
id : [ec2://aws.amazon.com/]-[i-e0d2ad8a]
state : ACTIVE (running)
access : ssh://107.21.154.248/
shutting down [ec2://aws.amazon.com/]-[i-e0d2ad8a] (ACTIVE)
state : EXPIRED (destroyed by user)
"""
# ------------------------------------------------------------------------------
#
# helper
#
def usage (msg = None) :
if msg :
print "\n Error: %s\n" % msg
print """
Usage:
%s -l : list VMs
%s -c <id> [...] : create VM
%s -u <id> [...] : use VMs (run jobs)
%s -d <id> [...] : destroy VMs
Environment:
EC2_URL : backend manager service endpoint
EC2_ACCESS_KEY : id for backend access
EC2_SECRET_KEY : key for backend access
EC2_KEYPAIR_ID : name of keypair for VM access
EC2_KEYPAIR : public ssh key for VM access
"""
if msg : sys.exit (-1)
else : sys.exit ( 0)
# ------------------------------------------------------------------------------
#
def state2str (state) :
if state == saga.resource.UNKNOWN : return "UNKNOWN"
if state == saga.resource.NEW : return "NEW "
if state == saga.resource.PENDING : return "PENDING"
if state == saga.resource.ACTIVE : return "ACTIVE"
if state == saga.resource.CANCELED : return "CANCELED"
if state == saga.resource.EXPIRED : return "EXPIRED"
if state == saga.resource.DONE : return "DONE "
if state == saga.resource.FAILED : return "FAILED"
# ------------------------------------------------------------------------------
#
# set up the connection to EC2
#
if not 'EC2_URL' in os.environ : usage ("no %s in environment" % 'EC2_URL' )
if not 'EC2_ACCESS_KEY' in os.environ : usage ("no %s in environment" % 'EC2_ACCESS_KEY')
if not 'EC2_SECRET_KEY' in os.environ : usage ("no %s in environment" % 'EC2_SECRET_KEY')
if not 'EC2_KEYPAIR_ID' in os.environ : usage ("no %s in environment" % 'EC2_KEYPAIR_ID')
if not 'EC2_KEYPAIR' in os.environ : usage ("no %s in environment" % 'EC2_KEYPAIR' )
server = saga.Url(os.environ['EC2_URL'])
# in order to connect to EC2, we need an EC2 ID and KEY
c1 = saga.Context ('ec2')
c1.user_id = os.environ['EC2_ACCESS_KEY']
c1.user_key = os.environ['EC2_SECRET_KEY']
c1.server = server
# in order to access a created VM, we additionally need to point to the ssh
# key which is used for EC2 VM contextualization, i.e. as EC2 'keypair'.
# If the keypair is not yet registered on EC2, it will be registered by SAGA
# -- but then a user_key *must* be specified (only the public key is ever
# transfererd to EC2).
c2 = saga.Context ('ec2_keypair')
c2.token = os.environ['EC2_KEYPAIR_ID']
c2.user_cert = os.environ['EC2_KEYPAIR']
c2.user_id = 'ubuntu' # the user id on the target VM
c2.server = server
# we create a session for all SAGA interactions, and attach the respective
# security contexts. Those are now avail for all SAGA objects created in
# that session
s = saga.Session (False) # FALSE: don't use any other (default) contexts
s.contexts.append (c1)
s.contexts.append (c2)
# in this session, connect to the EC2 resource manager
rm = saga.resource.Manager (server, session=s)
# --------------------------------------------------------------------------
#
# setup is done, evaluate command line parameters
#
args = sys.argv[1:]
# --------------------------------------------------------------------------
if '-l' in args :
args.remove ('-l')
if len (args) > 0 :
usage ("no additional args allowed on '-l'")
# list known VMs (compute resources)
print "\ncompute resources"
for cr_id in rm.list () :
print " %s" % cr_id
# list the available VM templates
print "\ncompute templates"
for tmp in rm.list_templates () :
print " %s" % tmp
# we can also list the available OS images, as per below -- but since
# the list of OS images avaialble on EC2 is *huge*, this operation is
# rather slow (libcloud does one additional hop per image, for
# inspection)
# {'name': 'None (cube-1-0-5-2012-09-07)', 'ispublic': 'true', 'state': 'available', 'rootdevicetype': 'instance-store', 'imagetype': 'machine'}
print "\nOS images"
descr = None
ispublic = None
for osi in rm.list_images () :
descr = rm.get_image (osi)
if descr['ispublic'] == 'true' :
ispublic = 'public'
else:
ispublic = 'private'
print " %s - %s, %s, %s" % (osi, descr['name'], ispublic,
descr['state'])
print
sys.exit (0)
# --------------------------------------------------------------------------
elif '-c' in args :
args.remove ('-c')
if len (args) == 0 :
usage ("additional args required on '-c'")
for image in args :
print"\ncreating an instance from image %s" % image
# create a resource description with an image and an OS template, out of
# the ones listed above. We pick a small VM and a plain Ubuntu image...
cd = saga.resource.ComputeDescription ()
cd.image = image
cd.template = 'Small Instance'
# create a VM instance with that description, and inspect it for some
# detailes
cr = rm.acquire (cd)
print "\nCreated VM"
print " id : %s" % cr.id
print " state : %s (%s)" % (state2str(cr.state), cr.state_detail)
print " access : %s" % cr.access
sys.exit (0)
# --------------------------------------------------------------------------
elif '-u' in args :
args.remove ('-u')
if len (args) == 0 :
usage ("additional args required on '-u'")
# we want to reconnect to running VMs, specified by their IDs
for vm_id in args :
print "\nconnecting to %s" % vm_id
# get a handle on that VM, and print some information
cr = rm.acquire (vm_id)
print " id : %s" % cr.id
print " state : %s (%s)" % (state2str(cr.state), cr.state_detail)
# make sure the machine is not in final state already
if cr.state in [saga.resource.EXPIRED,
saga.resource.DONE,
saga.resource.FAILED] :
print " VM %s is alrady in final state" % vm_id
continue
# we only can run jobs on ACTIVE machines -- so lets wait until the VM
# is in that state.
# Note: the careful coder will spot the subtle race condition between the
# check above and the check on this line... ;-)
if cr.state != saga.resource.ACTIVE :
print " wait for ACTIVE state"
cr.wait (saga.resource.ACTIVE)
# Once a VM comes active, it still needs to boot and setup the ssh
# daemon to be usable -- we thus wait for a while
time.sleep (60)
print " state : %s (%s)" % (state2str(cr.state), cr.state_detail)
print " access : %s" % cr.access
# The session created above contains the ssh context to access the VM
# instance -- that context was created from the ec2_keypair context
# which was earlier used for VM contextualization. So we use that
# session to create a job service instance for that VM:
js = saga.job.Service (cr.access, session=s)
print "running job"
# all ready: do the deed!
j = js.run_job ('sleep 10')
print " job state : %s" % j.state
j.wait ()
print " job state : %s" % j.state
print
sys.exit (0)
# --------------------------------------------------------------------------
elif '-d' in args :
args.remove ('-d')
if len (args) == 0 :
usage ("additional args required on '-d'")
# we want to reconnect to running VMs, specified by their IDs
for vm_id in args :
print "\nreconnecting to id %s" % vm_id
# get a handle on that VM, and print some information
cr = rm.acquire (vm_id)
print " id : %s" % cr.id
print " state : %s (%s)" % (state2str(cr.state), cr.state_detail)
print " access : %s" % cr.access
if cr.state in [saga.resource.EXPIRED,
saga.resource.DONE,
saga.resource.FAILED] :
print " VM %s is alrady in final state" % vm_id
continue
print "\nshutting down %s " % cr.id
cr.destroy ()
print " state : %s (%s)" % (state2str(cr.state), cr.state_detail)
print
sys.exit (0)
# --------------------------------------------------------------------------
else :
usage ('invalid arguments')
| |
import argparse
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
import data
import thin_stack
def linearize_tree(vocab, root, xp=numpy):
# Left node indexes for all parent nodes
lefts = []
# Right node indexes for all parent nodes
rights = []
# Parent node indexes
dests = []
# All labels to predict for all parent nodes
labels = []
# All words of leaf nodes
words = []
# Leaf labels
leaf_labels = []
# Current leaf node index
leaf_index = [0]
def traverse_leaf(exp):
if len(exp) == 2:
label, leaf = exp
if leaf not in vocab:
vocab[leaf] = len(vocab)
words.append(vocab[leaf])
leaf_labels.append(int(label))
leaf_index[0] += 1
elif len(exp) == 3:
_, left, right = exp
traverse_leaf(left)
traverse_leaf(right)
traverse_leaf(root)
# Current internal node index
node_index = leaf_index
leaf_index = [0]
def traverse_node(exp):
if len(exp) == 2:
leaf_index[0] += 1
return leaf_index[0] - 1
elif len(exp) == 3:
label, left, right = exp
l = traverse_node(left)
r = traverse_node(right)
lefts.append(l)
rights.append(r)
dests.append(node_index[0])
labels.append(int(label))
node_index[0] += 1
return node_index[0] - 1
traverse_node(root)
assert len(lefts) == len(words) - 1
return {
'lefts': xp.array(lefts, xp.int32),
'rights': xp.array(rights, xp.int32),
'dests': xp.array(dests, xp.int32),
'words': xp.array(words, xp.int32),
'labels': xp.array(labels, xp.int32),
'leaf_labels': xp.array(leaf_labels, xp.int32),
}
def convert(batch, device):
if device is None:
def to_device(x):
return x
elif device < 0:
to_device = cuda.to_cpu
else:
def to_device(x):
return cuda.to_gpu(x, device, cuda.Stream.null)
return tuple(
[to_device(d['lefts']) for d in batch] +
[to_device(d['rights']) for d in batch] +
[to_device(d['dests']) for d in batch] +
[to_device(d['labels']) for d in batch] +
[to_device(d['words']) for d in batch] +
[to_device(d['leaf_labels']) for d in batch]
)
class ThinStackRecursiveNet(chainer.Chain):
def __init__(self, n_vocab, n_units, n_label):
super(ThinStackRecursiveNet, self).__init__(
embed=L.EmbedID(n_vocab, n_units),
l=L.Linear(n_units * 2, n_units),
w=L.Linear(n_units, n_label))
self.n_units = n_units
def leaf(self, x):
return self.embed(x)
def node(self, left, right):
return F.tanh(self.l(F.concat((left, right))))
def label(self, v):
return self.w(v)
def forward(self, *inputs):
batch = len(inputs) // 6
lefts = inputs[0: batch]
rights = inputs[batch: batch * 2]
dests = inputs[batch * 2: batch * 3]
labels = inputs[batch * 3: batch * 4]
sequences = inputs[batch * 4: batch * 5]
leaf_labels = inputs[batch * 5: batch * 6]
inds = numpy.argsort([-len(l) for l in lefts])
# Sort all arrays in descending order and transpose them
lefts = F.transpose_sequence([lefts[i] for i in inds])
rights = F.transpose_sequence([rights[i] for i in inds])
dests = F.transpose_sequence([dests[i] for i in inds])
labels = F.transpose_sequence([labels[i] for i in inds])
sequences = F.transpose_sequence([sequences[i] for i in inds])
leaf_labels = F.transpose_sequence(
[leaf_labels[i] for i in inds])
batch = len(inds)
maxlen = len(sequences)
loss = 0
count = 0
correct = 0
stack = self.xp.zeros(
(batch, maxlen * 2, self.n_units), self.xp.float32)
for i, (word, label) in enumerate(zip(sequences, leaf_labels)):
batch = word.shape[0]
es = self.leaf(word)
ds = self.xp.full((batch,), i, self.xp.int32)
y = self.label(es)
loss += F.softmax_cross_entropy(y, label, normalize=False) * batch
count += batch
predict = self.xp.argmax(y.array, axis=1)
correct += (predict == label.array).sum()
stack = thin_stack.thin_stack_set(stack, ds, es)
for left, right, dest, label in zip(lefts, rights, dests, labels):
l, stack = thin_stack.thin_stack_get(stack, left)
r, stack = thin_stack.thin_stack_get(stack, right)
o = self.node(l, r)
y = self.label(o)
batch = l.shape[0]
loss += F.softmax_cross_entropy(y, label, normalize=False) * batch
count += batch
predict = self.xp.argmax(y.array, axis=1)
correct += (predict == label.array).sum()
stack = thin_stack.thin_stack_set(stack, dest, o)
loss /= count
reporter.report({'loss': loss}, self)
reporter.report({'total': count}, self)
reporter.report({'correct': correct}, self)
return loss
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', default=400, type=int,
help='number of epochs to learn')
parser.add_argument('--unit', '-u', default=30, type=int,
help='number of units')
parser.add_argument('--batchsize', '-b', type=int, default=25,
help='learning minibatch size')
parser.add_argument('--label', '-l', type=int, default=5,
help='number of labels')
parser.add_argument('--epocheval', '-p', type=int, default=5,
help='number of epochs per evaluation')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args()
vocab = {}
max_size = None
train_trees = data.read_corpus('trees/train.txt', max_size)
test_trees = data.read_corpus('trees/test.txt', max_size)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
xp = cuda.cupy
else:
xp = numpy
train_data = [linearize_tree(vocab, t, xp) for t in train_trees]
train_iter = chainer.iterators.SerialIterator(train_data, args.batchsize)
test_data = [linearize_tree(vocab, t, xp) for t in test_trees]
test_iter = chainer.iterators.SerialIterator(
test_data, args.batchsize, repeat=False, shuffle=False)
model = ThinStackRecursiveNet(len(vocab), args.unit, args.label)
if args.gpu >= 0:
model.to_gpu()
optimizer = chainer.optimizers.AdaGrad(0.1)
optimizer.setup(model)
updater = training.StandardUpdater(
train_iter, optimizer, device=None, converter=convert)
trainer = training.Trainer(updater, (args.epoch, 'epoch'))
trainer.extend(
extensions.Evaluator(test_iter, model, converter=convert, device=None),
trigger=(args.epocheval, 'epoch'))
trainer.extend(extensions.LogReport())
trainer.extend(extensions.MicroAverage(
'main/correct', 'main/total', 'main/accuracy'))
trainer.extend(extensions.MicroAverage(
'validation/main/correct', 'validation/main/total',
'validation/main/accuracy'))
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))
trainer.run()
if __name__ == '__main__':
main()
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 10/04/2016
@author: Tarek Taha
Naive code to perform static exploration
"""
import rospy
import math
from math import radians, degrees, cos, sin, tan, pi
import csv
import sys, os
import time
import cv2
import numpy as np
import actionlib
from geometry_msgs.msg import PoseWithCovarianceStamped, Quaternion, PoseWithCovariance, Twist, Point
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import Odometry
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Imu
from visualization_msgs.msg import Marker
from tf.transformations import quaternion_from_euler, euler_from_quaternion
# Variables
gotOdom = False
laser_min_range = 1 # Used to ignore the UR5 arm
wall_distance = 2
hough_angle_res = math.pi/64 # rad. Used for hough
hough_range_res = 0.05 # meters. Used for hough
hough_threshold = 10 # Minimum points to consider a line. Used for hough
def generateRelativePositionGoal(pose):
global current_pose, past_pose;
past_pose = current_pose;
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = '/odom'
goal.target_pose.pose.position.x = current_pose[0] + pose[0]
goal.target_pose.pose.position.y = current_pose[1] + pose[1]
goal.target_pose.pose.position.z = current_pose[2] + pose[2]
angle = radians(current_pose[3] + pose[3])
quat = quaternion_from_euler(0.0, 0.0, angle)
goal.target_pose.pose.orientation = Quaternion(*quat.tolist())
return goal
def poseCallback(data):
global current_pose, gotOdom
if not hasattr(poseCallback, "start_time"):
poseCallback.start_time = rospy.Time() # it doesn't exist yet, so initialize it
x = data.pose.pose.position.x
y = data.pose.pose.position.y
z = data.pose.pose.position.z
roll, pitch, yaw = euler_from_quaternion([data.pose.pose.orientation.x,
data.pose.pose.orientation.y,
data.pose.pose.orientation.z,
data.pose.pose.orientation.w])
current_pose = [x,y,z,yaw]
gotOdom = True
# Show message once a second
if (rospy.Time.now() - poseCallback.start_time > rospy.Duration(1)):
poseCallback.start_time = rospy.Time.now()
rospy.loginfo("Current Location x: " + str(x) + "y: " + str(y) + "z: " + str(z) + " yaw: " + str(degrees(yaw)))
# Generate range using floats
def frange(start, end, jump):
out = [start]
while True:
start += jump
out.append(start)
if (start > end):
break;
return out
class HoughLine:
def __init__(self, r, angle, votes):
self.r = r
self.angle = angle
self.votes = votes
def __repr__(self):
return repr((self.r, self.angle, self.votes))
# Hough transform designed for radial laser data
def houghTransform(r, alpha):
global laser_min_range, hough_angle_res, hough_range_res, hough_threshold
theta = frange(0, 2*math.pi, hough_angle_res)
rho = frange(0, max(r), hough_range_res)
# Create 2D grid of theta and rho
votes = [[0 for i in range(len(rho))] for j in range(len(theta))]
# analyze each point
for p in range(len(r)):
if (r[p] < laser_min_range):
continue;
for i_t in range(len(theta)):
r_temp = r[p]*cos(alpha[p] - theta[i_t])
if (r_temp < 0):
continue
r_temp = hough_range_res*round(r_temp/hough_range_res) # round to nearest value of rho
i_r = int(r_temp/hough_range_res) # find index of corresponding rho
votes[i_t][i_r] += 1
# Find max votes
v_max = 0
for i_r in range(len(rho)):
for i_t in range(len(theta)):
if (votes[i_t][i_r] > v_max):
v_max = votes[i_t][i_r]
#print("DONE")
#sys.exit()
'''
# Extract lines by thresholding
line_out = [] #Output lines
for i_r in range(len(rho)):
for i_t in range(len(theta)):
if (votes[i_t][i_r] >= hough_threshold): # and votes[i_t][i_r] >= v_max*0.5
h = HoughLine(rho[i_r], theta[i_t], votes[i_t][i_r])
line_out.append(h)
'''
# Check for local maxima
line_out = [] #Output lines
for i_r in range(len(rho)):
for i_t in range(len(theta)):
if (votes[i_t][i_r] >= hough_threshold):
isMaxima = True
for j_r in range(i_r-2, i_r+2, 1):
if (not isMaxima):
break
# wrap around
k_r = j_r
if (j_r < 0):
k_r = len(rho)-j_r
for j_t in range(i_t-2, i_t+2, 1):
if (i_t == j_t and i_r == j_r):
continue
# wrap around
k_t = j_t
if (j_t < 0):
k_t = len(theta)-j_t
if (votes[i_t][i_r] <= votes[k_t][k_r]):
isMaxima = False
break
if (isMaxima):
h = HoughLine(rho[i_r], theta[i_t], votes[i_t][i_r])
line_out.append(h)
# Sort them in descending order
line_out = sorted(line_out, key=lambda l: l.votes, reverse=True)
# Create markers for each
marker = Marker()
marker.header.frame_id = "/base_link"
marker.type = marker.LINE_LIST
marker.pose.orientation.w = 1
marker.scale.x = 0.01
marker.color.a = 1.0
marker.color.b = 1.0
# Print y = mx+c versions of each line and make a marker for each
for i in range(len(line_out)):
m = tan(line_out[i].angle)
c = line_out[i].r / cos(line_out[i].angle)
q = 2.5
y1, x1 = [-q,-m*q + c]
y2, x2 = [ q, m*q + c]
print ("votes = " + str(line_out[i].votes) + "\tr = " + str(line_out[i].r) + "\tangle = " + str(round(line_out[i].angle/hough_angle_res))+ "\ty = " + str(m) + "x + " + str(c))
p1 = Point()
p1.x = x1
p1.y = y1
p1.z = 0
marker.points.append(p1)
p2 = Point()
p2.x = x2
p2.y = y2
p2.z = 0
marker.points.append(p2)
# Publish markers
marker_pub.publish(marker)
print("")
# NextMove
def nextMove(ranges, angles):
# Find min range
r_min = ranges[0]
r_min_index = 0
for i in range(len(ranges)):
if ( ranges[i] < r_min ):
r_min = ranges[i]
r_min_index = i
# Laser scanner callback
def scan_callback(msg):
ranges = []
angles = []
for i in range(len(msg.ranges)):
# Skip points at infinity
if ( math.isinf(msg.ranges[i]) ):
continue
# Record range and compute angle
ranges.append(msg.ranges[i])
angles.append(msg.angle_max - 2 * i * msg.angle_max / len(msg.ranges))
# Extract lines with hough transform
#houghTransform(ranges, angles)
nextMove(ranges, angles)
if __name__=='__main__':
rospy.init_node("find_panel")
# Set up subscribers and navigation stack
current_pose = [0,0,0,0]
scan_sub = rospy.Subscriber('/camera/scan', LaserScan, scan_callback)
pose_sub = rospy.Subscriber("/odometry/filtered", Odometry, poseCallback)
marker_pub = rospy.Publisher ("/explore/HoughLines", Marker, queue_size = 100)
navigationActionServer = actionlib.SimpleActionClient('/move_base', MoveBaseAction)
rospy.loginfo("Connecting to the move Action Server")
navigationActionServer.wait_for_server()
rospy.loginfo("Ready ...")
# Wait for initial odometry
while (not gotOdom):
time.sleep(0.01)
starting_pose = current_pose
starting_angle = 0
time.sleep(200)
# Case 1: No object detected, move around
# Case 2: Object detected, go in front of it
# Case 2.1: Oops, false alarm. Go back to moving
# Case 2.2: Done positioning. Terminate
# Case 3: Went 360, couldn't find it. Go back to exploration
# 1: Move at slight angle perp to wall
rospy.loginfo("Generate the desired configuration in front of panel")
#pose = [(64, -25, 0.0, 0.0)]
pose = [0, 0, 0, 0]
goal = generateRelativePositionGoal(pose)
rospy.loginfo("Moving Robot to the desired configuration in front of panel")
navigationActionServer.send_goal(goal)
rospy.loginfo("Waiting for Robot to reach the desired configuration in front of panel")
navigationActionServer.wait_for_result()
navResult = navigationActionServer.get_result()
navState = navigationActionServer.get_state()
rospy.loginfo("Finished Navigating")
print "Result: ", str(navResult)
# Outcome 3 : SUCCESS, 4 : ABORTED , 5: REJECTED
if (navState == 3):
status = "SUCCESS"
elif (navState == 4):
status = "ABORTED"
elif (navState == 5):
status = "REJECTED"
else:
status = "UNKNOWN (code " + str(navState) + ")"
print ("Navigation status: " + status)
'''
#while True:
for pose in waypoints:
rospy.loginfo("Creating navigation goal...")
goal = generateGoal(pose)
rospy.loginfo("Moving Robot desired goal")
navigationActionServer.send_goal(goal)
#to stop if obstacle is sensed in the range of laser
while (navigationActionServer.get_state()==0 or navigationActionServer.get_state()==1):
rospy.sleep(0.1)
if (g_range_ahead < 29):
navigationActionServer.cancel_goal()
rospy.loginfo("Obstacle in front")
break
#to break out from the waypoints loop
if (g_range_ahead < 29):
break
rospy.loginfo("Waiting for Robot to reach goal")
navigationActionServer.wait_for_result()
rospy.sleep(10.)
#define the obstacle moving goal
rospy.loginfo("Creating obstacle goal...")
print "current_pose: ", str(current_pose)
print "obstacle_angle", str(obstacle_angle)
print "angle_discrete", str(angle_discrete)
print "angle_id", str(angle_id)
print "angle_number", str(angle_number)
obst_goal_local_position = [(g_range_ahead - 2) * cos(obstacle_angle),(g_range_ahead - 2) * sin(obstacle_angle)]
obst_goal_global_position = [current_pose[0] + cos(current_pose[3]) * obst_goal_local_position[0] - sin(current_pose[3]) * obst_goal_local_position[1], current_pose[1] + sin(current_pose[3]) * obst_goal_local_position[0] + cos(current_pose[3]) * obst_goal_local_position[1]]
pose = [(obst_goal_global_position[0],obst_goal_global_position[1], 0, obstacle_angle + current_pose[3])]
goal = generateGoal(pose)
print "cos(current_pose[3])", str(cos(current_pose[3]))
print "local postion: ", str(obst_goal_local_position)
print "global postion: ", str(obst_goal_global_position)
print "goal: ", str(pose)
rospy.loginfo("Moving Robot to the obstacle")
navigationActionServer.send_goal(goal)
#to stop 3 meters away of the obstacle
while (navigationActionServer.get_state()==0 or navigationActionServer.get_state()==1):
rospy.sleep(0.1)
if (g_range_ahead < 3):
navigationActionServer.cancel_goal()
rospy.loginfo("3 meter in front of Obstacle")
break
rospy.loginfo("Waiting for Robot to reach obstacle goal")
navigationActionServer.wait_for_result()
'''
| |
#!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Utilities for converting between v3 and v1 datastore protocol buffers.
This module is internal and should not be used by client applications.
"""
import six
from google.appengine.datastore import datastore_v4_pb2
from google.appengine.datastore import entity_v4_pb2
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2
_MIN_CLOUD_DATASTORE_VERSION = (6, 0, 0)
_CLOUD_DATASTORE_ENABLED = False
try:
import googledatastore
if googledatastore.VERSION >= _MIN_CLOUD_DATASTORE_VERSION:
_CLOUD_DATASTORE_ENABLED = True
except ImportError:
pass
except AttributeError:
pass
MISSING_CLOUD_DATASTORE_MESSAGE = (
'Could not import googledatastore. This library must be installed with '
'version >= %s to use the Cloud Datastore API.' %
'.'.join([str(v) for v in _MIN_CLOUD_DATASTORE_VERSION]))
MEANING_ATOM_CATEGORY = 1
MEANING_URL = 2
MEANING_ATOM_TITLE = 3
MEANING_ATOM_CONTENT = 4
MEANING_ATOM_SUMMARY = 5
MEANING_ATOM_AUTHOR = 6
MEANING_NON_RFC_3339_TIMESTAMP = 7
MEANING_GD_EMAIL = 8
MEANING_GEORSS_POINT = 9
MEANING_GD_IM = 10
MEANING_GD_PHONENUMBER = 11
MEANING_GD_POSTALADDRESS = 12
MEANING_PERCENT = 13
MEANING_TEXT = 15
MEANING_BYTESTRING = 16
MEANING_BLOBKEY = 17
MEANING_INDEX_ONLY = 18
MEANING_PREDEFINED_ENTITY_USER = 20
MEANING_PREDEFINED_ENTITY_POINT = 21
MEANING_ZLIB = 22
MEANING_POINT_WITHOUT_V3_MEANING = 23
MEANING_EMPTY_LIST = 24
URI_MEANING_ZLIB = 'ZLIB'
MAX_URL_CHARS = 2083
MAX_INDEXED_STRING_CHARS = 500
MAX_INDEXED_BLOB_BYTES = 500
MAX_PARTITION_ID_LENGTH = 100
MAX_DATASET_ID_SECTION_LENGTH = 100
MAX_DATASET_ID_LENGTH = MAX_DATASET_ID_SECTION_LENGTH * 3 + 2
MAX_KEY_PATH_LENGTH = 100
PROPERTY_NAME_X = 'x'
PROPERTY_NAME_Y = 'y'
PROPERTY_NAME_EMAIL = 'email'
PROPERTY_NAME_AUTH_DOMAIN = 'auth_domain'
PROPERTY_NAME_USER_ID = 'user_id'
PROPERTY_NAME_INTERNAL_ID = 'internal_id'
PROPERTY_NAME_FEDERATED_IDENTITY = 'federated_identity'
PROPERTY_NAME_FEDERATED_PROVIDER = 'federated_provider'
PROPERTY_NAME_KEY = '__key__'
DEFAULT_GAIA_ID = 0
RFC_3339_MIN_MICROSECONDS_INCLUSIVE = -62135596800 * 1000 * 1000
RFC_3339_MAX_MICROSECONDS_INCLUSIVE = 253402300799 * 1000 * 1000 + 999999
def v4_key_to_string(v4_key):
"""Generates a string representing a key's path.
The output makes no effort to qualify special characters in strings.
The key need not be valid, but if any of the key path elements have
both a name and an ID the name is ignored.
Args:
v4_key: An `entity_v4_pb2.Key`.
Returns:
A string representing the key's path.
"""
path_element_strings = []
for path_element in v4_key.path_element:
if path_element.HasField('id'):
id_or_name = str(path_element.id)
elif path_element.HasField('name'):
id_or_name = path_element.name
else:
id_or_name = ''
path_element_strings.append('%s: %s' % (path_element.kind, id_or_name))
return '[%s]' % ', '.join(path_element_strings)
def is_complete_v4_key(v4_key):
"""Returns True if a key specifies an ID or name, False otherwise.
Args:
v4_key: An `entity_v4_pb2.Key`.
Returns:
`True` if the key specifies an ID or name, `False` otherwise.
"""
assert len(v4_key.path_element) >= 1
last_element = v4_key.path_element(len(v4_key.path_element) - 1)
return last_element.HasField('id') or last_element.HasField('name')
def v1_key_to_string(v1_key):
"""Generates a string representing a key's path.
The output makes no effort to qualify special characters in strings.
The key need not be valid, but if any of the key path elements have
both a name and an ID the name is ignored.
Args:
v1_key: An `googledatastore.Key`.
Returns:
A string representing the key's path.
"""
path_element_strings = []
for path_element in v1_key.path:
field = path_element.WhichOneof('id_type')
if field == 'id':
id_or_name = str(path_element.id)
elif field == 'name':
id_or_name = path_element.name
else:
id_or_name = ''
path_element_strings.append('%s: %s' % (path_element.kind, id_or_name))
return '[%s]' % ', '.join(path_element_strings)
def is_complete_v1_key(v1_key):
"""Returns `True` if a key specifies an ID or name, `False` otherwise.
Args:
v1_key: An `googledatastore.Key`.
Returns:
`True` if the key specifies an ID or name, `False` otherwise.
"""
assert len(v1_key.path) >= 1
last_element = v1_key.path[len(v1_key.path) - 1]
return last_element.WhichOneof('id_type') is not None
def is_complete_v3_key(v3_key):
"""Returns `True` if a key specifies an ID or name, `False` otherwise.
Args:
v3_key: A `datastore_pb.Reference`.
Returns:
`True` if the key specifies an ID or name, `False` otherwise.
"""
assert len(v3_key.path.element) >= 1
last_element = v3_key.path.element[-1]
return ((last_element.HasField('id') and last_element.id) or
(last_element.HasField('name') and last_element.name))
def get_v1_mutation_key_and_entity(v1_mutation):
"""Returns the v1 key and entity for a v1 mutation proto, if applicable.
Args:
v1_mutation: A `googledatastore.Mutation`.
Returns:
A tuple `(googledatastore.Key for this mutation,
googledatastore.Entity or None if the mutation is a deletion)`.
"""
if v1_mutation.HasField('delete'):
return v1_mutation.delete, None
else:
v1_entity = getattr(v1_mutation, v1_mutation.WhichOneof('operation'))
return v1_entity.key, v1_entity
def is_valid_utf8(s):
if isinstance(s, six.text_type):
return True
try:
s.decode('utf-8')
return True
except UnicodeDecodeError:
return False
def check_conversion(condition, message):
"""Asserts a conversion condition and raises an error if it's not met.
Args:
condition: A boolean condition to enforce.
message: Error message.
Raises:
InvalidConversionError: if condition is not met
"""
if not condition:
raise InvalidConversionError(message)
def is_in_rfc_3339_bounds(microseconds):
return (RFC_3339_MIN_MICROSECONDS_INCLUSIVE <= microseconds
<= RFC_3339_MAX_MICROSECONDS_INCLUSIVE)
class InvalidConversionError(Exception):
"""Raised when conversion fails."""
pass
class IdResolver(object):
"""A class that can handle project id <--> application id transformations."""
def __init__(self, app_ids=()):
"""Create a new `IdResolver`.
Args:
app_ids: A list of application ids with application id shard set. i.e.
s~my_app or e~my_app.
"""
resolver_map = {}
for app_id in app_ids:
resolver_map[self.resolve_project_id(app_id)] = app_id
self._resolver_map = resolver_map
def resolve_project_id(self, app_id):
"""Converts an application id to a project id.
Args:
app_id: The application id.
Returns:
The project id.
"""
return app_id.rsplit('~')[-1]
def resolve_app_id(self, project_id):
"""Converts a project id to an application id.
Args:
project_id: The project id.
Returns:
The application id.
Raises:
InvalidConversionError: if the application is unknown for the project id.
"""
check_conversion(project_id in self._resolver_map,
'Cannot determine application id for provided project id: '
'"%s".'
% project_id)
return self._resolver_map[project_id]
class _IdentityIdResolver(IdResolver):
"""An `IdResolver` that resolve app_id == project_id."""
def resolve_project_id(self, app_id):
return app_id
def resolve_app_id(self, project_id):
return project_id
class _EntityConverter(object):
"""Converter for entities and keys."""
def __init__(self, id_resolver):
"""Creates a new `EntityConverter`.
Args:
id_resolver: An `IdResolver` object for converting.
`project_id <--> application_id`.
"""
self._id_resolver = id_resolver
def v4_to_v3_reference(self, v4_key, v3_ref):
"""Converts a v4 Key to a v3 Reference.
Args:
v4_key: An `entity_v4_pb2.Key`.
v3_ref: An `entity_pb2.Reference` to populate.
"""
v3_ref.Clear()
if v4_key.HasField('partition_id'):
if v4_key.partition_id.HasField('dataset_id'):
v3_ref.app = v4_key.partition_id.dataset_id
if v4_key.partition_id.HasField('namespace'):
v3_ref.name_space = v4_key.partition_id.namespace
for v4_element in v4_key.path_element:
v3_element = v3_ref.path.element.add()
v3_element.type = v4_element.kind
if v4_element.HasField('id'):
v3_element.id = v4_element.id
if v4_element.HasField('name'):
v3_element.name = v4_element.name
def v4_to_v3_references(self, v4_keys):
"""Converts a list of v4 Keys to a list of v3 References.
Args:
v4_keys: A list of `entity_v4_pb2.Key` objects.
Returns:
A list of `entity_pb2.Reference` objects.
"""
v3_refs = []
for v4_key in v4_keys:
v3_ref = entity_pb2.Reference()
self.v4_to_v3_reference(v4_key, v3_ref)
v3_refs.append(v3_ref)
return v3_refs
def v3_to_v4_key(self, v3_ref, v4_key):
"""Converts a v3 Reference to a v4 Key.
Args:
v3_ref: An `entity_p2b.Reference`.
v4_key: An `entity_v4_pb2.Key` to populate.
"""
v4_key.Clear()
if not v3_ref.app:
return
v4_key.partition_id.dataset_id = v3_ref.app
if v3_ref.name_space:
v4_key.partition_id.namespace = v3_ref.name_space
for v3_element in v3_ref.path.element:
v4_element = v4_key.path_element.add()
v4_element.kind = v3_element.type
if v3_element.HasField('id'):
v4_element.id = v3_element.id
if v3_element.HasField('name'):
v4_element.name = v3_element.name
def v3_to_v4_keys(self, v3_refs):
"""Converts a list of v3 References to a list of v4 Keys.
Args:
v3_refs: A list of `entity_pb2.Reference` objects.
Returns:
A list of `entity_v4_pb2.Key` objects.
"""
v4_keys = []
for v3_ref in v3_refs:
v4_key = entity_v4_pb2.Key()
self.v3_to_v4_key(v3_ref, v4_key)
v4_keys.append(v4_key)
return v4_keys
def v4_to_v3_entity(self, v4_entity, v3_entity, is_projection=False):
"""Converts a v4 Entity to a v3 `EntityProto`.
Args:
v4_entity: An `entity_v4_pb2.Entity`.
v3_entity: An `entity_pb2.EntityProto` to populate.
is_projection: `True` if the `v4_entity` is from a projection query.
"""
v3_entity.Clear()
for v4_property in v4_entity.property:
property_name = v4_property.name
v4_value = v4_property.value
if v4_value.list_value:
for v4_sub_value in v4_value.list_value:
self.__add_v3_property_from_v4(
property_name, True, is_projection, v4_sub_value, v3_entity)
else:
self.__add_v3_property_from_v4(
property_name, False, is_projection, v4_value, v3_entity)
if v4_entity.HasField('key'):
v4_key = v4_entity.key
self.v4_to_v3_reference(v4_key, v3_entity.key)
v3_ref = v3_entity.key
self.v3_reference_to_group(v3_ref, v3_entity.entity_group)
else:
pass
def v3_to_v4_entity(self, v3_entity, v4_entity):
"""Converts a v3 `EntityProto` to a v4 Entity.
Args:
v3_entity: An `entity_pb2.EntityProto`.
v4_entity: An `entity_v4_pb2.Proto` to populate.
"""
v4_entity.Clear()
self.v3_to_v4_key(v3_entity.key, v4_entity.key)
if not v3_entity.key.HasField('app'):
v4_entity.ClearField('key')
v4_properties = {}
for v3_property in v3_entity.property:
self.__add_v4_property_to_entity(v4_entity, v4_properties, v3_property,
True)
for v3_property in v3_entity.raw_property:
self.__add_v4_property_to_entity(v4_entity, v4_properties, v3_property,
False)
def v4_value_to_v3_property_value(self, v4_value, v3_value):
"""Converts a v4 Value to a v3 `PropertyValue`.
Args:
v4_value: An `entity_v4_pb2.Value`.
v3_value: An `entity_pb2.PropertyValue` to populate.
"""
v3_value.Clear()
if v4_value.HasField('boolean_value'):
v3_value.booleanValue = v4_value.boolean_value
elif v4_value.HasField('integer_value'):
v3_value.int64Value = v4_value.integer_value
elif v4_value.HasField('double_value'):
v3_value.doubleValue = v4_value.double_value
elif v4_value.HasField('timestamp_microseconds_value'):
v3_value.int64Value = v4_value.timestamp_microseconds_value
elif v4_value.HasField('key_value'):
v3_ref = entity_pb2.Reference()
self.v4_to_v3_reference(v4_value.key_value, v3_ref)
self.v3_reference_to_v3_property_value(v3_ref, v3_value)
elif v4_value.HasField('blob_key_value'):
v3_value.stringValue = v4_value.blob_key_value
elif v4_value.HasField('string_value'):
v3_value.stringValue = v4_value.string_value.encode('utf-8')
elif v4_value.HasField('blob_value'):
v3_value.stringValue = v4_value.blob_value
elif v4_value.HasField('entity_value'):
v4_entity_value = v4_value.entity_value
v4_meaning = v4_value.meaning
if (v4_meaning == MEANING_GEORSS_POINT
or v4_meaning == MEANING_PREDEFINED_ENTITY_POINT):
self.__v4_to_v3_point_value(v4_entity_value, v3_value.pointvalue)
elif v4_meaning == MEANING_PREDEFINED_ENTITY_USER:
self.v4_entity_to_v3_user_value(v4_entity_value, v3_value.uservalue)
else:
v3_entity_value = entity_pb2.EntityProto()
self.v4_to_v3_entity(v4_entity_value, v3_entity_value)
v3_value.stringValue = v3_entity_value.SerializePartialToString()
elif v4_value.HasField('geo_point_value'):
point_value = v3_value.pointvalue
point_value.x = v4_value.geo_point_value.latitude
point_value.y = v4_value.geo_point_value.longitude
else:
pass
def v3_property_to_v4_value(self, v3_property, indexed, v4_value):
"""Converts a v3 Property to a v4 Value.
Args:
v3_property: An `entity_pb2.Property`.
indexed: Whether the v3 property is indexed.
v4_value: An `entity_v4_pb2.Value` to populate.
"""
v4_value.Clear()
v3_property_value = v3_property.value
v3_meaning = v3_property.meaning
v3_uri_meaning = None
if v3_property.meaning_uri:
v3_uri_meaning = v3_property.meaning_uri
if not self.__is_v3_property_value_union_valid(v3_property_value):
v3_meaning = None
v3_uri_meaning = None
elif v3_meaning == entity_pb2.Property.NO_MEANING:
v3_meaning = None
elif not self.__is_v3_property_value_meaning_valid(v3_property_value,
v3_meaning):
v3_meaning = None
is_zlib_value = False
if v3_uri_meaning:
if v3_uri_meaning == URI_MEANING_ZLIB:
if v3_property_value.HasField('stringValue'):
is_zlib_value = True
if v3_meaning != entity_pb2.Property.BLOB:
v3_meaning = entity_pb2.Property.BLOB
else:
pass
else:
pass
if v3_property_value.HasField('booleanValue'):
v4_value.boolean_value = v3_property_value.booleanValue
elif v3_property_value.HasField('int64Value'):
if v3_meaning == entity_pb2.Property.GD_WHEN:
v4_value.timestamp_microseconds_value = v3_property_value.int64Value
v3_meaning = None
else:
v4_value.integer_value = v3_property_value.int64Value
elif v3_property_value.HasField('doubleValue'):
v4_value.double_value = v3_property_value.doubleValue
elif v3_property_value.HasField('referencevalue'):
v3_ref = entity_pb2.Reference()
self.__v3_reference_value_to_v3_reference(
v3_property_value.referencevalue, v3_ref)
self.v3_to_v4_key(v3_ref, v4_value.key_value)
elif v3_property_value.HasField('stringValue'):
if v3_meaning == entity_pb2.Property.ENTITY_PROTO:
serialized_entity_v3 = v3_property_value.stringValue
v3_entity = entity_pb2.EntityProto()
v3_entity.ParsePartialFromString(serialized_entity_v3)
self.v3_to_v4_entity(v3_entity, v4_value.entity_value)
v3_meaning = None
elif (v3_meaning == entity_pb2.Property.BLOB or
v3_meaning == entity_pb2.Property.BYTESTRING):
v4_value.blob_value = v3_property_value.stringValue
if indexed or v3_meaning == entity_pb2.Property.BLOB:
v3_meaning = None
else:
string_value = v3_property_value.stringValue
if is_valid_utf8(string_value):
if v3_meaning == entity_pb2.Property.BLOBKEY:
v4_value.blob_key_value = string_value
v3_meaning = None
else:
v4_value.string_value = string_value
else:
v4_value.blob_value = string_value
if v3_meaning != entity_pb2.Property.INDEX_VALUE:
v3_meaning = None
elif v3_property_value.HasField('pointvalue'):
if v3_meaning == MEANING_GEORSS_POINT:
point_value = v3_property_value.pointvalue
v4_value.geo_point_value.latitude = point_value.x
v4_value.geo_point_value.longitude = point_value.y
else:
self.__v3_to_v4_point_entity(v3_property_value.pointvalue,
v4_value.entity_value)
v4_value.meaning = MEANING_PREDEFINED_ENTITY_POINT
v3_meaning = None
elif v3_property_value.HasField('uservalue'):
self.v3_user_value_to_v4_entity(v3_property_value.uservalue,
v4_value.entity_value)
v4_value.meaning = MEANING_PREDEFINED_ENTITY_USER
v3_meaning = None
else:
pass
if is_zlib_value:
v4_value.meaning = MEANING_ZLIB
elif v3_meaning:
v4_value.meaning = v3_meaning
if indexed != v4_value.indexed:
v4_value.indexed = indexed
def v4_to_v3_property(self, property_name, is_multi, is_projection,
v4_value, v3_property):
"""Converts info from a v4 Property to a v3 Property.
`v4_value` must not have a `list_value`.
Args:
property_name: The name of the property.
is_multi: Whether the property contains multiple values.
is_projection: Whether the property is projected.
v4_value: An `entity_v4_pb2.Value`.
v3_property: An `entity_pb2.Property` to populate.
"""
assert not v4_value.list_value, 'v4 list_value not convertable to v3'
v3_property.Clear()
v3_property.name = property_name
if v4_value.HasField('meaning') and v4_value.meaning == MEANING_EMPTY_LIST:
v3_property.meaning = MEANING_EMPTY_LIST
v3_property.multiple = False
v3_property.value.Clear()
return
v3_property.multiple = is_multi
self.v4_value_to_v3_property_value(v4_value, v3_property.value)
v4_meaning = None
if v4_value.HasField('meaning'):
v4_meaning = v4_value.meaning
if v4_value.HasField('timestamp_microseconds_value'):
v3_property.meaning = entity_pb2.Property.GD_WHEN
elif v4_value.HasField('blob_key_value'):
v3_property.meaning = entity_pb2.Property.BLOBKEY
elif v4_value.HasField('blob_value'):
if v4_meaning == MEANING_ZLIB:
v3_property.meaning_uri = URI_MEANING_ZLIB
if v4_meaning == entity_pb2.Property.BYTESTRING:
if v4_value.indexed:
pass
else:
if v4_value.indexed:
v3_property.meaning = entity_pb2.Property.BYTESTRING
else:
v3_property.meaning = entity_pb2.Property.BLOB
v4_meaning = None
elif v4_value.HasField('entity_value'):
if v4_meaning != MEANING_GEORSS_POINT:
if (v4_meaning != MEANING_PREDEFINED_ENTITY_POINT
and v4_meaning != MEANING_PREDEFINED_ENTITY_USER):
v3_property.meaning = entity_pb2.Property.ENTITY_PROTO
v4_meaning = None
elif v4_value.HasField('geo_point_value'):
v3_property.meaning = MEANING_GEORSS_POINT
else:
pass
if v4_meaning is not None:
v3_property.meaning = v4_meaning
if is_projection:
v3_property.meaning = entity_pb2.Property.INDEX_VALUE
def __add_v3_property_from_v4(self, property_name, is_multi, is_projection,
v4_value, v3_entity):
"""Adds a v3 Property to an Entity based on information from a v4 Property.
Args:
property_name: The name of the property.
is_multi: Whether the property contains multiple values.
is_projection: Whether the property is a projection.
v4_value: An `entity_v4_pb2.Value`.
v3_entity: An `entity_pb2.EntityProto`.
"""
if v4_value.indexed:
prop = v3_entity.property.add()
self.v4_to_v3_property(property_name, is_multi, is_projection, v4_value,
prop)
else:
prop = v3_entity.raw_property.add()
self.v4_to_v3_property(property_name, is_multi, is_projection, v4_value,
prop)
def __build_name_to_v4_property_map(self, v4_entity):
property_map = {}
for prop in v4_entity.property:
property_map[prop.name] = prop
return property_map
def __add_v4_property_to_entity(self, v4_entity, property_map, v3_property,
indexed):
"""Adds a v4 Property to an entity or modifies an existing one.
`property_map` is used to track of properties that have already been added.
The same dict should be used for all of an entity's properties.
Args:
v4_entity: An `entity_v4_pb.Entity`.
property_map: A dict of name -> v4_property.
v3_property: An `entity_pb2.Property` to convert to v4 and add to the
dict.
indexed: Whether the property is indexed.
"""
property_name = v3_property.name
if property_name in property_map:
v4_property = property_map[property_name]
else:
v4_property = v4_entity.property.add()
v4_property.name = property_name
property_map[property_name] = v4_property
if v3_property.multiple:
self.v3_property_to_v4_value(v3_property, indexed,
v4_property.value.list_value.add())
else:
self.v3_property_to_v4_value(v3_property, indexed,
v4_property.value)
def __get_v4_integer_value(self, v4_property):
"""Returns an integer value from a v4 Property.
Args:
v4_property: An `entity_v4_pb2.Property`.
Returns:
An integer.
Raises:
InvalidConversionError: if the property doesn't contain an integer value
"""
check_conversion(v4_property.value.HasField('integer_value'),
'Property does not contain an integer value.')
return v4_property.value.integer_value
def __get_v4_double_value(self, v4_property):
"""Returns a double value from a v4 Property.
Args:
v4_property: An `entity_v4_pb2.Property`.
Returns:
A double.
Raises:
InvalidConversionError: if the property doesn't contain a double value
"""
check_conversion(v4_property.value.HasField('double_value'),
'Property does not contain a double value.')
return v4_property.value.double_value
def __get_v4_string_value(self, v4_property):
"""Returns a string value from a v4 Property.
Args:
v4_property: An `entity_v4_pb2.Property`.
Returns:
A string.
Throws:
InvalidConversionError: if the property doesn't contain a string value
"""
check_conversion(v4_property.value.HasField('string_value'),
'Property does not contain a string value.')
return v4_property.value.string_value
def __v4_integer_property(self, name, value, indexed):
"""Creates a single-integer-valued v4 Property.
Args:
name: The property name.
value: The integer value of the property.
indexed: Whether the value should be indexed.
Returns:
An `entity_v4_pb2.Property`.
"""
v4_property = entity_v4_pb2.Property()
v4_property.name = name
v4_value = v4_property.value
v4_value.indexed = indexed
v4_value.integer_value = value
return v4_property
def __v4_double_property(self, name, value, indexed):
"""Creates a single-double-valued v4 Property.
Args:
name: The property name.
value: The double value of the property.
indexed: Whether the value should be indexed.
Returns:
An `entity_v4_pb2.Property`.
"""
v4_property = entity_v4_pb2.Property()
v4_property.name = name
v4_value = v4_property.value
v4_value.indexed = indexed
v4_value.double_value = value
return v4_property
def __v4_string_property(self, name, value, indexed):
"""Creates a single-string-valued v4 Property.
Args:
name: The property name.
value: The string value of the property.
indexed: Whether the value should be indexed.
Returns:
An `entity_v4_pb2.Property`.
"""
v4_property = entity_v4_pb2.Property()
v4_property.name = name
v4_value = v4_property.value
v4_value.indexed = indexed
v4_value.string_value = value
return v4_property
def __v4_to_v3_point_value(self, v4_point_entity, v3_point_value):
"""Converts a v4 point Entity to a v3 `PointValue`.
Args:
v4_point_entity: An `entity_v4_pb2.Entity` representing a point.
v3_point_value: An `entity_pb2.PropertyValue.PointValue` to populate.
"""
v3_point_value.Clear()
name_to_v4_property = self.__build_name_to_v4_property_map(v4_point_entity)
v3_point_value.x = self.__get_v4_double_value(name_to_v4_property['x'])
v3_point_value.y = self.__get_v4_double_value(name_to_v4_property['y'])
def __v3_to_v4_point_entity(self, v3_point_value, v4_entity):
"""Converts a v3 `UserValue` to a v4 user Entity.
Args:
v3_point_value: An `entity_pb2.PropertyValue.PointValue`.
v4_entity: An `entity_v4_pb2.Entity` to populate.
"""
v4_entity.Clear()
v4_entity.property.append(
self.__v4_double_property(PROPERTY_NAME_X, v3_point_value.x, False))
v4_entity.property.append(
self.__v4_double_property(PROPERTY_NAME_Y, v3_point_value.y, False))
def v4_entity_to_v3_user_value(self, v4_user_entity, v3_user_value):
"""Converts a v4 user Entity to a v3 `UserValue`.
Args:
v4_user_entity: An `entity_v4_pb2.Entity` representing a user.
v3_user_value: An `entity_pb2.PropertyValue.UserValue` to populate.
"""
v3_user_value.Clear()
name_to_v4_property = self.__build_name_to_v4_property_map(v4_user_entity)
v3_user_value.email = self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_EMAIL])
v3_user_value.auth_domain = self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_AUTH_DOMAIN])
if PROPERTY_NAME_USER_ID in name_to_v4_property:
v3_user_value.obfuscated_gaiaid = self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_USER_ID])
if PROPERTY_NAME_INTERNAL_ID in name_to_v4_property:
v3_user_value.gaiaid = self.__get_v4_integer_value(
name_to_v4_property[PROPERTY_NAME_INTERNAL_ID])
else:
v3_user_value.gaiaid = 0
if PROPERTY_NAME_FEDERATED_IDENTITY in name_to_v4_property:
v3_user_value.federated_identity = self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_FEDERATED_IDENTITY])
if PROPERTY_NAME_FEDERATED_PROVIDER in name_to_v4_property:
v3_user_value.federated_provider = self.__get_v4_string_value(
name_to_v4_property[PROPERTY_NAME_FEDERATED_PROVIDER])
def v3_user_value_to_v4_entity(self, v3_user_value, v4_entity):
"""Converts a v3 `UserValue` to a v4 user Entity.
Args:
v3_user_value: An `entity_pb2.PropertyValue.UserValue`.
v4_entity: An `entity_v4_pb2.Entity` to populate.
"""
v4_entity.Clear()
v4_entity.property.append(
self.__v4_string_property(PROPERTY_NAME_EMAIL, v3_user_value.email,
False))
v4_entity.property.append(
self.__v4_string_property(PROPERTY_NAME_AUTH_DOMAIN,
v3_user_value.auth_domain, False))
if v3_user_value.gaiaid != 0:
v4_entity.property.append(
self.__v4_integer_property(PROPERTY_NAME_INTERNAL_ID,
v3_user_value.gaiaid, False))
if v3_user_value.HasField('obfuscated_gaiaid'):
v4_entity.property.append(
self.__v4_string_property(PROPERTY_NAME_USER_ID,
v3_user_value.obfuscated_gaiaid, False))
if v3_user_value.HasField('federated_identity'):
v4_entity.property.append(
self.__v4_string_property(PROPERTY_NAME_FEDERATED_IDENTITY,
v3_user_value.federated_identity, False))
if v3_user_value.HasField('federated_provider'):
v4_entity.property.append(
self.__v4_string_property(PROPERTY_NAME_FEDERATED_PROVIDER,
v3_user_value.federated_provider, False))
def v1_to_v3_reference(self, v1_key, v3_ref):
"""Converts a v1 Key to a v3 Reference.
Args:
v1_key: An `googledatastore.Key`.
v3_ref: An `entity_pb2.Reference` to populate.
"""
v3_ref.Clear()
if v1_key.HasField('partition_id'):
project_id = v1_key.partition_id.project_id
if project_id:
app_id = self._id_resolver.resolve_app_id(project_id)
v3_ref.app = app_id
if v1_key.partition_id.namespace_id:
v3_ref.name_space = v1_key.partition_id.namespace_id
for v1_element in v1_key.path:
v3_element = v3_ref.path.element.add()
v3_element.type = v1_element.kind.encode('utf-8')
id_type = v1_element.WhichOneof('id_type')
if id_type == 'id':
v3_element.id = v1_element.id
elif id_type == 'name':
v3_element.name = v1_element.name.encode('utf-8')
def v1_to_v3_references(self, v1_keys):
"""Converts a list of v1 Keys to a list of v3 References.
Args:
v1_keys: A list of `googledatastore.Key` objects.
Returns:
A list of `entity_pb2.Reference` objects.
"""
v3_refs = []
for v1_key in v1_keys:
v3_ref = entity_pb2.Reference()
self.v1_to_v3_reference(v1_key, v3_ref)
v3_refs.append(v3_ref)
return v3_refs
def v3_to_v1_key(self, v3_ref, v1_key):
"""Converts a v3 Reference to a v1 Key.
Args:
v3_ref: An `entity_pb2.Reference`.
v1_key: An `googledatastore.Key` to populate.
"""
v1_key.Clear()
if not v3_ref.app:
return
project_id = self._id_resolver.resolve_project_id(v3_ref.app)
v1_key.partition_id.project_id = project_id
if v3_ref.name_space:
v1_key.partition_id.namespace_id = v3_ref.name_space
for v3_element in v3_ref.path.element:
v1_element = v1_key.path.add()
v1_element.kind = v3_element.type
if v3_element.HasField('id'):
v1_element.id = v3_element.id
if v3_element.HasField('name'):
v1_element.name = v3_element.name
def v3_to_v1_keys(self, v3_refs):
"""Converts a list of v3 References to a list of v1 Keys.
Args:
v3_refs: A list of `entity_pb2.Reference` objects.
Returns:
A list of `googledatastore.Key` objects.
"""
v1_keys = []
for v3_ref in v3_refs:
v1_key = googledatastore.Key()
self.v3_to_v1_key(v3_ref, v1_key)
v1_keys.append(v1_key)
return v1_keys
def project_to_app_id(self, project_id):
"""Converts a string project id to a string app id."""
return self._id_resolver.resolve_app_id(project_id)
def app_to_project_id(self, app_id):
"""Converts a string app id to a string project id."""
return self._id_resolver.resolve_project_id(app_id)
def __new_v3_property(self, v3_entity, is_indexed):
if is_indexed:
return v3_entity.property.add()
else:
return v3_entity.raw_property.add()
def v1_to_v3_entity(self, v1_entity, v3_entity, is_projection=False):
"""Converts a v1 Entity to a v3 `EntityProto`.
Args:
v1_entity: An `googledatastore.Entity`.
v3_entity: An `entity_pb2.EntityProto` to populate.
is_projection: `True` if the `v1_entity` is from a projection query.
"""
v3_entity.Clear()
for property_name, v1_value in six.iteritems(v1_entity.properties):
if v1_value.HasField('array_value'):
if len(v1_value.array_value.values) == 0:
empty_list = self.__new_v3_property(v3_entity,
not v1_value.exclude_from_indexes)
empty_list.name = property_name
empty_list.multiple = False
empty_list.meaning = MEANING_EMPTY_LIST
empty_list.value.Clear()
else:
for v1_sub_value in v1_value.array_value.values:
list_element = self.__new_v3_property(
v3_entity, not v1_sub_value.exclude_from_indexes)
self.v1_to_v3_property(
property_name, True, is_projection, v1_sub_value, list_element)
else:
value_property = self.__new_v3_property(
v3_entity, not v1_value.exclude_from_indexes)
self.v1_to_v3_property(
property_name, False, is_projection, v1_value, value_property)
if v1_entity.HasField('key'):
v1_key = v1_entity.key
self.v1_to_v3_reference(v1_key, v3_entity.key)
v3_ref = v3_entity.key
self.v3_reference_to_group(v3_ref, v3_entity.entity_group)
else:
pass
def v3_to_v1_entity(self, v3_entity, v1_entity):
"""Converts a v3 `EntityProto` to a v1 Entity.
Args:
v3_entity: An `entity_pb2.EntityProto`.
v1_entity: An `googledatastore.Proto` to populate.
"""
v1_entity.Clear()
self.v3_to_v1_key(v3_entity.key, v1_entity.key)
if not v3_entity.key.HasField('app'):
v1_entity.ClearField('key')
for v3_property in v3_entity.property:
self.__add_v1_property_to_entity(v1_entity, v3_property, True)
for v3_property in v3_entity.raw_property:
self.__add_v1_property_to_entity(v1_entity, v3_property, False)
def v1_value_to_v3_property_value(self, v1_value, v3_value):
"""Converts a v1 Value to a v3 `PropertyValue`.
Args:
v1_value: An `googledatastore.Value`.
v3_value: An `entity_pb2.PropertyValue` to populate.
"""
v3_value.Clear()
field = v1_value.WhichOneof('value_type')
if field == 'boolean_value':
v3_value.booleanValue = v1_value.boolean_value
elif field == 'integer_value':
v3_value.int64Value = v1_value.integer_value
elif field == 'double_value':
v3_value.doubleValue = v1_value.double_value
elif field == 'timestamp_value':
v3_value.int64Value = googledatastore.helper.micros_from_timestamp(
v1_value.timestamp_value)
elif field == 'key_value':
v3_ref = entity_pb2.Reference()
self.v1_to_v3_reference(v1_value.key_value, v3_ref)
self.v3_reference_to_v3_property_value(v3_ref, v3_value)
elif field == 'string_value':
v3_value.stringValue = v1_value.string_value.encode('utf-8')
elif field == 'blob_value':
v3_value.stringValue = v1_value.blob_value
elif field == 'entity_value':
v1_entity_value = v1_value.entity_value
v1_meaning = v1_value.meaning
if v1_meaning == MEANING_PREDEFINED_ENTITY_USER:
self.v1_entity_to_v3_user_value(v1_entity_value, v3_value.uservalue)
else:
v3_entity_value = entity_pb2.EntityProto()
self.v1_to_v3_entity(v1_entity_value, v3_entity_value)
v3_value.stringValue = v3_entity_value.SerializePartialToString()
elif field == 'geo_point_value':
point_value = v3_value.pointvalue
point_value.x = v1_value.geo_point_value.latitude
point_value.y = v1_value.geo_point_value.longitude
elif field == 'null_value':
v3_value.Clear()
else:
v3_value.Clear()
def v3_property_to_v1_value(self, v3_property, indexed, v1_value):
"""Converts a v3 Property to a v1 Value.
Args:
v3_property: An `entity_pb2.Property`.
indexed: Whether the v3 property is indexed.
v1_value: An `googledatastore.Value` to populate.
"""
v1_value.Clear()
v3_property_value = v3_property.value
v3_meaning = v3_property.meaning
v3_uri_meaning = None
if v3_property.meaning_uri:
v3_uri_meaning = v3_property.meaning_uri
if not self.__is_v3_property_value_union_valid(v3_property_value):
v3_meaning = None
v3_uri_meaning = None
elif v3_meaning == entity_pb2.Property.NO_MEANING:
v3_meaning = None
elif not self.__is_v3_property_value_meaning_valid(v3_property_value,
v3_meaning):
v3_meaning = None
is_zlib_value = False
if v3_uri_meaning:
if v3_uri_meaning == URI_MEANING_ZLIB:
if v3_property_value.HasField('stringValue'):
is_zlib_value = True
if v3_meaning != entity_pb2.Property.BLOB:
v3_meaning = entity_pb2.Property.BLOB
else:
pass
else:
pass
if v3_property.meaning == entity_pb2.Property.EMPTY_LIST:
v1_value.array_value.values.extend([])
v3_meaning = None
elif v3_property_value.HasField('booleanValue'):
v1_value.boolean_value = v3_property_value.booleanValue
elif v3_property_value.HasField('int64Value'):
if (v3_meaning == entity_pb2.Property.GD_WHEN and
is_in_rfc_3339_bounds(v3_property_value.int64Value)):
googledatastore.helper.micros_to_timestamp(v3_property_value.int64Value,
v1_value.timestamp_value)
v3_meaning = None
else:
v1_value.integer_value = v3_property_value.int64Value
elif v3_property_value.HasField('doubleValue'):
v1_value.double_value = v3_property_value.doubleValue
elif v3_property_value.HasField('referencevalue'):
v3_ref = entity_pb2.Reference()
self.__v3_reference_value_to_v3_reference(
v3_property_value.referencevalue, v3_ref)
self.v3_to_v1_key(v3_ref, v1_value.key_value)
elif v3_property_value.HasField('stringValue'):
if v3_meaning == entity_pb2.Property.ENTITY_PROTO:
serialized_entity_v3 = v3_property_value.stringValue
v3_entity = entity_pb2.EntityProto()
v3_entity.ParseFromString(serialized_entity_v3)
self.v3_to_v1_entity(v3_entity, v1_value.entity_value)
v3_meaning = None
elif (v3_meaning == entity_pb2.Property.BLOB or
v3_meaning == entity_pb2.Property.BYTESTRING):
v1_value.blob_value = v3_property_value.stringValue
if indexed or v3_meaning == entity_pb2.Property.BLOB:
v3_meaning = None
else:
string_value = v3_property_value.stringValue
if is_valid_utf8(string_value):
v1_value.string_value = string_value
else:
v1_value.blob_value = string_value
if v3_meaning != entity_pb2.Property.INDEX_VALUE:
v3_meaning = None
elif v3_property_value.HasField('pointvalue'):
if v3_meaning != MEANING_GEORSS_POINT:
v1_value.meaning = MEANING_POINT_WITHOUT_V3_MEANING
point_value = v3_property_value.pointvalue
v1_value.geo_point_value.latitude = point_value.x
v1_value.geo_point_value.longitude = point_value.y
v3_meaning = None
elif v3_property_value.HasField('uservalue'):
self.v3_user_value_to_v1_entity(v3_property_value.uservalue,
v1_value.entity_value)
v1_value.meaning = MEANING_PREDEFINED_ENTITY_USER
v3_meaning = None
else:
v1_value.null_value = googledatastore.NULL_VALUE
if is_zlib_value:
v1_value.meaning = MEANING_ZLIB
elif v3_meaning:
v1_value.meaning = v3_meaning
if indexed == v1_value.exclude_from_indexes:
v1_value.exclude_from_indexes = not indexed
def v1_to_v3_property(self, property_name, is_multi, is_projection,
v1_value, v3_property):
"""Converts info from a v1 Property to a v3 Property.
v1_value must not have an array_value.
Args:
property_name: The name of the property, unicode.
is_multi: whether The property contains multiple values.
is_projection: Whether the property is projected.
v1_value: An `googledatastore.Value`.
v3_property: An `entity_pb2.Property` to populate.
"""
v1_value_type = v1_value.WhichOneof('value_type')
if v1_value_type == 'array_value':
assert False, 'v1 array_value not convertable to v3'
v3_property.Clear()
v3_property.name = property_name.encode('utf-8')
v3_property.multiple = is_multi
self.v1_value_to_v3_property_value(v1_value, v3_property.value)
v1_meaning = None
if v1_value.meaning:
v1_meaning = v1_value.meaning
if v1_value_type == 'timestamp_value':
v3_property.meaning = entity_pb2.Property.GD_WHEN
elif v1_value_type == 'blob_value':
if v1_meaning == MEANING_ZLIB:
v3_property.meaning_uri = URI_MEANING_ZLIB
if v1_meaning == entity_pb2.Property.BYTESTRING:
if not v1_value.exclude_from_indexes:
pass
else:
if not v1_value.exclude_from_indexes:
v3_property.meaning = entity_pb2.Property.BYTESTRING
else:
v3_property.meaning = entity_pb2.Property.BLOB
v1_meaning = None
elif v1_value_type == 'entity_value':
if v1_meaning != MEANING_PREDEFINED_ENTITY_USER:
v3_property.meaning = entity_pb2.Property.ENTITY_PROTO
v1_meaning = None
elif v1_value_type == 'geo_point_value':
if v1_meaning != MEANING_POINT_WITHOUT_V3_MEANING:
v3_property.meaning = MEANING_GEORSS_POINT
v1_meaning = None
elif v1_value_type == 'integer_value':
if v1_meaning == MEANING_NON_RFC_3339_TIMESTAMP:
v3_property.meaning = entity_pb2.Property.GD_WHEN
v1_meaning = None
else:
pass
if v1_meaning is not None:
v3_property.meaning = v1_meaning
if is_projection:
v3_property.meaning = entity_pb2.Property.INDEX_VALUE
def __add_v1_property_to_entity(self, v1_entity, v3_property, indexed):
"""Adds a v1 Property to an entity or modifies an existing one.
Args:
v1_entity: An `googledatastore.Entity`.
v3_property: An `entity_pb2.Property` to convert to v1 and add to the
dict.
indexed: Whether the property is indexed.
"""
property_name = v3_property.name
v1_value = v1_entity.properties[property_name]
if v3_property.multiple:
self.v3_property_to_v1_value(v3_property, indexed,
v1_value.array_value.values.add())
else:
self.v3_property_to_v1_value(v3_property, indexed, v1_value)
def __get_v1_integer_value(self, v1_value):
"""Returns an integer value from a v1 Value.
Args:
v1_value: A `googledatastore.Value`.
Returns:
An integer.
Raises:
InvalidConversionError: if the value doesn't contain an integer value
"""
check_conversion(v1_value.HasField('integer_value'),
'Value does not contain an integer value.')
return v1_value.integer_value
def __get_v1_double_value(self, v1_value):
"""Returns a double value from a v1 Value.
Args:
v1_value: An `googledatastore.Value`.
Returns:
A double.
Raises:
InvalidConversionError: if the value doesn't contain a double value
"""
check_conversion(v1_value.HasField('double_value'),
'Value does not contain a double value.')
return v1_value.double_value
def __get_v1_string_value(self, v1_value):
"""Returns an string value from a v1 Value.
Args:
v1_value: An `googledatastore.Value`.
Returns:
A string.
Throws:
InvalidConversionError: if the value doesn't contain a string value
"""
check_conversion(v1_value.HasField('string_value'),
'Value does not contain a string value.')
return v1_value.string_value.encode('utf-8')
def __v1_integer_property(self, entity, name, value, indexed):
"""Populates a single-integer-valued v1 Property.
Args:
entity: The entity to populate.
name: The name of the property to populate.
value: The integer value of the property.
indexed: Whether the value should be indexed.
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.integer_value = value
def __v1_double_property(self, entity, name, value, indexed):
"""Populates a single-double-valued v1 Property.
Args:
entity: The entity to populate.
name: The name of the property to populate.
value: The double value of the property.
indexed: Whether the value should be indexed.
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.double_value = value
def __v1_string_property(self, entity, name, value, indexed):
"""Populates a single-string-valued v1 Property.
Args:
entity: The entity to populate.
name: The name of the property to populate.
value: The string value of the property.
indexed: Whether the value should be indexed.
"""
v1_value = entity.properties[name]
v1_value.exclude_from_indexes = not indexed
v1_value.string_value = value
def v1_entity_to_v3_user_value(self, v1_user_entity, v3_user_value):
"""Converts a v1 user Entity to a v3 `UserValue`.
Args:
v1_user_entity: An `googledatastore.Entity` representing a user.
v3_user_value: An `entity_pb2.Property.UserValue` to populate.
"""
v3_user_value.Clear()
properties = v1_user_entity.properties
v3_user_value.email = self.__get_v1_string_value(
properties[PROPERTY_NAME_EMAIL])
v3_user_value.auth_domain = self.__get_v1_string_value(
properties[PROPERTY_NAME_AUTH_DOMAIN])
if PROPERTY_NAME_USER_ID in properties:
v3_user_value.obfuscated_gaiaid = self.__get_v1_string_value(
properties[PROPERTY_NAME_USER_ID])
if PROPERTY_NAME_INTERNAL_ID in properties:
v3_user_value.gaiaid = self.__get_v1_integer_value(
properties[PROPERTY_NAME_INTERNAL_ID])
else:
v3_user_value.gaiaid = 0
if PROPERTY_NAME_FEDERATED_IDENTITY in properties:
v3_user_value.federated_identity = self.__get_v1_string_value(
properties[PROPERTY_NAME_FEDERATED_IDENTITY])
if PROPERTY_NAME_FEDERATED_PROVIDER in properties:
v3_user_value.federated_provider = self.__get_v1_string_value(
properties[PROPERTY_NAME_FEDERATED_PROVIDER])
def v3_user_value_to_v1_entity(self, v3_user_value, v1_entity):
"""Converts a v3 `UserValue` to a v1 user Entity.
Args:
v3_user_value: An `entity_pb2.Property_UserValue`.
v1_entity: An `googledatastore.Entity` to populate.
"""
v1_entity.Clear()
self.__v1_string_property(v1_entity, PROPERTY_NAME_EMAIL,
v3_user_value.email, False)
self.__v1_string_property(v1_entity, PROPERTY_NAME_AUTH_DOMAIN,
v3_user_value.auth_domain, False)
if v3_user_value.gaiaid != 0:
self.__v1_integer_property(v1_entity, PROPERTY_NAME_INTERNAL_ID,
v3_user_value.gaiaid, False)
if v3_user_value.HasField('obfuscated_gaiaid'):
self.__v1_string_property(v1_entity, PROPERTY_NAME_USER_ID,
v3_user_value.obfuscated_gaiaid, False)
if v3_user_value.HasField('federated_identity'):
self.__v1_string_property(v1_entity, PROPERTY_NAME_FEDERATED_IDENTITY,
v3_user_value.federated_identity, False)
if v3_user_value.HasField('federated_provider'):
self.__v1_string_property(v1_entity, PROPERTY_NAME_FEDERATED_PROVIDER,
v3_user_value.federated_provider, False)
def __is_v3_property_value_union_valid(self, v3_property_value):
"""Returns True if the v3 PropertyValue's union is valid."""
num_sub_values = (
v3_property_value.HasField('booleanValue') +
v3_property_value.HasField('int64Value') +
v3_property_value.HasField('doubleValue') +
v3_property_value.HasField('referencevalue') +
v3_property_value.HasField('stringValue') +
v3_property_value.HasField('pointvalue') +
v3_property_value.HasField('uservalue'))
return num_sub_values <= 1
def __is_v3_property_value_meaning_valid(self, v3_property_value, v3_meaning):
"""Returns True if the v3 PropertyValue's type value matches its meaning."""
def ReturnTrue():
return True
def HasStringValue():
return v3_property_value.HasField('stringValue')
def HasInt64Value():
return v3_property_value.HasField('int64Value')
def HasPointValue():
return v3_property_value.HasField('pointvalue')
def ReturnFalse():
return False
value_checkers = {
entity_pb2.Property.NO_MEANING: ReturnTrue,
entity_pb2.Property.INDEX_VALUE: ReturnTrue,
entity_pb2.Property.BLOB: HasStringValue,
entity_pb2.Property.TEXT: HasStringValue,
entity_pb2.Property.BYTESTRING: HasStringValue,
entity_pb2.Property.ATOM_CATEGORY: HasStringValue,
entity_pb2.Property.ATOM_LINK: HasStringValue,
entity_pb2.Property.ATOM_TITLE: HasStringValue,
entity_pb2.Property.ATOM_CONTENT: HasStringValue,
entity_pb2.Property.ATOM_SUMMARY: HasStringValue,
entity_pb2.Property.ATOM_AUTHOR: HasStringValue,
entity_pb2.Property.GD_EMAIL: HasStringValue,
entity_pb2.Property.GD_IM: HasStringValue,
entity_pb2.Property.GD_PHONENUMBER: HasStringValue,
entity_pb2.Property.GD_POSTALADDRESS: HasStringValue,
entity_pb2.Property.BLOBKEY: HasStringValue,
entity_pb2.Property.ENTITY_PROTO: HasStringValue,
entity_pb2.Property.GD_WHEN: HasInt64Value,
entity_pb2.Property.GD_RATING: HasInt64Value,
entity_pb2.Property.GEORSS_POINT: HasPointValue,
entity_pb2.Property.EMPTY_LIST: ReturnTrue,
}
default = ReturnFalse
return value_checkers.get(v3_meaning, default)()
def __v3_reference_has_id_or_name(self, v3_ref):
"""Determines if a v3 Reference specifies an ID or name.
Args:
v3_ref: An `entity_pb2.Reference`.
Returns:
Boolean. `True` if the last path element specifies an ID or name.
"""
path = v3_ref.path
assert len(path.element) >= 1
last_element = path.element[len(path.element) - 1]
return last_element.HasField('id') or last_element.HasField('name')
def v3_reference_to_group(self, v3_ref, group):
"""Converts a v3 Reference to a v3 Path representing the entity group.
The entity group is represented as an `entity_pb2.Path` containing only the
first element in the provided Reference.
Args:
v3_ref: An `entity_pb2.Reference`.
group: An `entity_pb2.Path` to populate.
"""
group.Clear()
path = v3_ref.path
assert len(path.element) >= 1
element = entity_pb2.Path.Element()
element.CopyFrom(path.element[0])
group.element.append(element)
def v3_reference_to_v3_property_value(self, v3_ref, v3_property_value):
"""Converts a v3 Reference to a v3 `PropertyValue`.
Args:
v3_ref: An `entity_pb2.Reference`.
v3_property_value: An `entity_pb2.PropertyValue` to populate.
"""
v3_property_value.Clear()
reference_value = v3_property_value.referencevalue
if v3_ref.HasField('app'):
reference_value.app = v3_ref.app
if v3_ref.HasField('name_space'):
reference_value.name_space = v3_ref.name_space
for v3_path_element in v3_ref.path.element:
v3_ref_value_path_element = reference_value.pathelement.add()
copy_path_element(v3_path_element, v3_ref_value_path_element)
def __v3_reference_value_to_v3_reference(self, v3_ref_value, v3_ref):
"""Converts a v3 `ReferenceValue` to a v3 Reference.
Args:
v3_ref_value: An `entity_pb2.PropertyValue.ReferenceValue`.
v3_ref: An `entity_pb2.Reference` to populate.
"""
v3_ref.Clear()
if v3_ref_value.HasField('app'):
v3_ref.app = v3_ref_value.app
if v3_ref_value.HasField('name_space'):
v3_ref.name_space = v3_ref_value.name_space
for v3_ref_value_path_element in v3_ref_value.pathelement:
v3_path_element = v3_ref.path.element.add()
copy_path_element(v3_ref_value_path_element, v3_path_element)
class _QueryConverter(object):
"""Base converter for v3 and v1 queries."""
def __init__(self, entity_converter):
self._entity_converter = entity_converter
def get_entity_converter(self):
return self._entity_converter
def _v3_filter_to_v1_property_filter(self, v3_filter, v1_property_filter):
"""Converts a v3 Filter to a v1 `PropertyFilter`.
Args:
v3_filter: A `datastore_pb.Filter`.
v1_property_filter: A `googledatastore.PropertyFilter` to populate.
Raises:
`InvalidConversionError` if the filter cannot be converted
"""
check_conversion(len(v3_filter.property) == 1, 'invalid filter')
check_conversion(v3_filter.op <= 5,
'unsupported filter op: %d' % v3_filter.op)
v1_property_filter.Clear()
v1_property_filter.op = v3_filter.op
v1_property_filter.property.name = v3_filter.property[0].name
self._entity_converter.v3_property_to_v1_value(v3_filter.property[0], True,
v1_property_filter.value)
def _v3_query_to_v1_ancestor_filter(self, v3_query, v1_property_filter):
"""Converts a v3 Query to a v1 ancestor `PropertyFilter`.
Args:
v3_query: A `datastore_pb.Query`.
v1_property_filter: A `googledatastore.PropertyFilter` to populate.
"""
v1_property_filter.Clear()
v1_property_filter.set_operator(
v3_query.shallow() and
googledatastore.PropertyFilter.HAS_PARENT or
googledatastore.PropertyFilter.HAS_ANCESTOR)
prop = v1_property_filter.property
prop.set_name(PROPERTY_NAME_KEY)
if v3_query.has_ancestor():
self._entity_converter.v3_to_v1_key(
v3_query.ancestor(),
v1_property_filter.value.mutable_key_value)
else:
v1_property_filter.value.null_value = googledatastore.NULL_VALUE
def v3_order_to_v1_order(self, v3_order, v1_order):
"""Converts a v3 Query order to a v1 `PropertyOrder`.
Args:
v3_order: A `datastore_pb.Query.Order`.
v1_order: A `googledatastore.PropertyOrder` to populate.
"""
v1_order.property.name = v3_order.property
if v3_order.HasField('direction'):
v1_order.direction = v3_order.direction
def _v3_filter_to_v4_property_filter(self, v3_filter, v4_property_filter):
"""Converts a v3 Filter to a v4 `PropertyFilter`.
Args:
v3_filter: A `datastore_pb.Filter`.
v4_property_filter: A `datastore_v4_pb2.PropertyFilter` to populate.
Raises:
`InvalidConversionError` if the filter cannot be converted
"""
check_conversion(len(v3_filter.property) == 1, 'invalid filter')
check_conversion(v3_filter.op <= 5,
'unsupported filter op: %d' % v3_filter.op)
v4_property_filter.Clear()
v4_property_filter.operator = v3_filter.op
v4_property_filter.property.name = v3_filter.property[0].name
self._entity_converter.v3_property_to_v4_value(v3_filter.property[0], True,
v4_property_filter.value)
def _v3_query_to_v4_ancestor_filter(self, v3_query, v4_property_filter):
"""Converts a v3 Query to a v4 ancestor `PropertyFilter`.
Args:
v3_query: A `datastore_pb.Query`.
v4_property_filter: A `datastore_v4_pb2.PropertyFilter` to populate.
"""
v4_property_filter.Clear()
v4_property_filter.operator = datastore_v4_pb2.PropertyFilter.HAS_ANCESTOR
v4_property_filter.property.name = PROPERTY_NAME_KEY
self._entity_converter.v3_to_v4_key(v3_query.ancestor,
v4_property_filter.value.key_value)
def v3_order_to_v4_order(self, v3_order, v4_order):
"""Converts a v3 Query order to a v4 `PropertyOrder`.
Args:
v3_order: A `datastore_pb.Query.Order`.
v4_order: A `datastore_v4_pb2.PropertyOrder` to populate.
"""
v4_order.property.name = v3_order.property
if v3_order.HasField('direction'):
v4_order.direction = v3_order.direction
def get_entity_converter(id_resolver=None):
"""Returns a converter for v3 and v1 entities and keys.
Args:
id_resolver: An `IdResolver` for project id resolution.
"""
id_resolver = id_resolver or _IdentityIdResolver()
return _EntityConverter(id_resolver)
def copy_path_element(source, destination):
if source.HasField('type'):
destination.type = source.type
if source.HasField('id'):
destination.id = source.id
if source.HasField('name'):
destination.name = source.name
| |
#
# SaveImage.py -- Save output images global plugin for Ginga
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""Save output images global plugin for Ginga."""
from __future__ import absolute_import, division, print_function
from ginga.util.six import itervalues
from ginga.util.six.moves import map
# STDLIB
import os
import shutil
# THIRD-PARTY
from astropy.io import fits
# GINGA
from ginga.GingaPlugin import GlobalPlugin
from ginga.gw import Widgets
from ginga.misc import Bunch
from ginga.util.iohelper import shorten_name
try:
from ginga.gw.GwHelp import DirectorySelection
except ImportError: # This is needed for RTD to build
pass
__all__ = []
class SaveImage(GlobalPlugin):
"""Save images to output files.
"""
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(SaveImage, self).__init__(fv)
# Image listing
self.columns = [('Image', 'IMAGE'), ('Mod. Ext.', 'MODEXT')]
# User preferences. Some are just default values and can also be
# changed by GUI.
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_SaveImage')
self.settings.addDefaults(output_directory = '.',
output_suffix = 'ginga',
include_chname = True,
clobber = False,
modified_only = True,
max_mosaic_size = 1e8,
max_rows_for_col_resize = 5000)
self.settings.load(onError='silent')
self.outdir = os.path.abspath(
self.settings.get('output_directory', '.'))
self.suffix = self.settings.get('output_suffix', 'ginga')
self.fv.add_callback('add-image', lambda *args: self.redo())
self.fv.add_callback('remove-image', lambda *args: self.redo())
self.fv.add_callback('add-channel',
lambda *args: self.update_channels())
self.fv.add_callback('delete-channel',
lambda *args: self.update_channels())
self.chnames = []
self.chname = None
self.gui_up = False
def build_gui(self, container):
"""Build GUI such that image list area is maximized."""
vbox, sw, orientation = Widgets.get_oriented_box(container)
msg_font = self.fv.get_font('sansFont', 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msg_font)
self.tw = tw
fr = Widgets.Expander('Instructions')
fr.set_widget(tw)
container.add_widget(fr, stretch=0)
captions = (('Channel:', 'label', 'Channel Name', 'combobox',
'Modified only', 'checkbutton'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.channel_name.set_tooltip('Channel for locating images to save')
b.channel_name.add_callback('activated', self.select_channel_cb)
mod_only = self.settings.get('modified_only', True)
b.modified_only.set_state(mod_only)
b.modified_only.add_callback('activated', lambda *args: self.redo())
b.modified_only.set_tooltip("Show only locally modified images")
container.add_widget(w, stretch=0)
captions = (('Path:', 'llabel', 'OutDir', 'entry', 'Browse', 'button'),
('Suffix:', 'llabel', 'Suffix', 'entry'))
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.outdir.set_text(self.outdir)
b.outdir.set_tooltip('Output directory')
b.outdir.add_callback('activated', lambda w: self.set_outdir())
b.browse.set_tooltip('Browse for output directory')
b.browse.add_callback('activated', lambda w: self.browse_outdir())
b.suffix.set_text(self.suffix)
b.suffix.set_tooltip('Suffix to append to filename')
b.suffix.add_callback('activated', lambda w: self.set_suffix())
container.add_widget(w, stretch=0)
self.treeview = Widgets.TreeView(auto_expand=True,
sortable=True,
selection='multiple',
use_alt_row_color=True)
self.treeview.setup_table(self.columns, 1, 'IMAGE')
self.treeview.add_callback('selected', self.toggle_save_cb)
container.add_widget(self.treeview, stretch=1)
captions = (('Status', 'llabel'), )
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.status.set_text('')
b.status.set_tooltip('Status message')
container.add_widget(w, stretch=0)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button('Save')
btn.set_tooltip('Save selected image(s)')
btn.add_callback('activated', lambda w: self.save_images())
btn.set_enabled(False)
btns.add_widget(btn, stretch=0)
self.w.save = btn
btn = Widgets.Button('Close')
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
container.add_widget(btns, stretch=0)
self.gui_up = True
# Initialize directory selection dialog
self.dirsel = DirectorySelection(self.fv.w.root.get_widget())
# Generate initial listing
self.update_channels()
def instructions(self):
self.tw.set_text("""Enter output directory and suffix, if different than default. Left click to select image name to save. Multiple images can be selected using click with Shift or CTRL key. Click Save to save the selected image(s).
Output image will have the filename of <inputname>_<suffix>.fits.""")
def redo(self, *args):
"""Generate listing of images that user can save."""
if not self.gui_up:
return
mod_only = self.w.modified_only.get_state()
treedict = Bunch.caselessDict()
self.treeview.clear()
self.w.status.set_text('')
channel = self.fv.get_channel(self.chname)
if channel is None:
return
# Only list modified images for saving. Scanning Datasrc is enough.
if mod_only:
all_keys = channel.datasrc.keys(sort='alpha')
# List all images in the channel.
else:
all_keys = channel.get_image_names()
# Extract info for listing and saving
for key in all_keys:
iminfo = channel.get_image_info(key)
path = iminfo.get('path')
idx = iminfo.get('idx')
t = iminfo.get('time_modified')
if path is None: # Special handling for generated buffer, eg mosaic
infile = key
is_fits = True
else:
infile = os.path.basename(path)
infile_ext = os.path.splitext(path)[1]
infile_ext = infile_ext.lower()
is_fits = False
if 'fit' in infile_ext:
is_fits = True
# Only list FITS files unless it is Ginga generated buffer
if not is_fits:
continue
# Only list modified buffers
if mod_only and t is None:
continue
# More than one ext modified, append to existing entry
if infile in treedict:
if t is not None:
treedict[infile].extlist.add(idx)
elist = sorted(treedict[infile].extlist)
treedict[infile].MODEXT = ';'.join(
map(self._format_extname, elist))
# Add new entry
else:
if t is None:
s = ''
extlist = set()
else:
s = self._format_extname(idx)
extlist = set([idx])
treedict[infile] = Bunch.Bunch(
IMAGE=infile, MODEXT=s, extlist=extlist, path=path)
self.treeview.set_tree(treedict)
# Resize column widths
n_rows = len(treedict)
if n_rows == 0:
self.w.status.set_text('Nothing available for saving')
elif n_rows < self.settings.get('max_rows_for_col_resize', 5000):
self.treeview.set_optimal_column_widths()
self.logger.debug('Resized columns for {0} row(s)'.format(n_rows))
def update_channels(self):
"""Update the GUI to reflect channels and image listing.
"""
if not self.gui_up:
return
self.logger.debug("channel configuration has changed--updating gui")
try:
channel = self.fv.get_channel(self.chname)
except KeyError:
channel = self.fv.get_channel_info()
if channel is None:
raise ValueError('No channel available')
self.chname = channel.name
w = self.w.channel_name
w.clear()
self.chnames = list(self.fv.get_channel_names())
#self.chnames.sort()
for chname in self.chnames:
w.append_text(chname)
# select the channel that is the current one
try:
i = self.chnames.index(channel.name)
except IndexError:
i = 0
self.w.channel_name.set_index(i)
# update the image listing
self.redo()
def select_channel_cb(self, w, idx):
self.chname = self.chnames[idx]
self.logger.debug("channel name changed to '%s'" % (self.chname))
self.redo()
def _format_extname(self, ext):
"""Pretty print given extension name and number tuple."""
if ext is None:
outs = ext
else:
outs = '{0},{1}'.format(ext[0], ext[1])
return outs
def browse_outdir(self):
"""Browse for output directory."""
self.dirsel.popup(
'Select directory', self.w.outdir.set_text, initialdir=self.outdir)
self.set_outdir()
def set_outdir(self):
"""Set output directory."""
dirname = self.w.outdir.get_text()
if os.path.isdir(dirname):
self.outdir = dirname
self.logger.debug('Output directory set to {0}'.format(self.outdir))
else:
self.w.outdir.set_text(self.outdir)
self.logger.error('{0} is not a directory'.format(dirname))
def set_suffix(self):
"""Set output suffix."""
self.suffix = self.w.suffix.get_text()
self.logger.debug('Output suffix set to {0}'.format(self.suffix))
def _write_history(self, pfx, hdu, linechar=60, indentchar=2):
"""Write change history to given HDU header.
Limit each HISTORY line to given number of characters.
Subsequent lines of the same history will be indented.
"""
channel = self.fv.get_channel(self.chname)
if channel is None:
return
history_plgname = 'ChangeHistory'
try:
history_obj = self.fv.gpmon.getPlugin(history_plgname)
except:
self.logger.error(
'{0} plugin is not loaded. No HISTORY will be written to '
'{1}.'.format(history_plgname, pfx))
return
if channel.name not in history_obj.name_dict:
self.logger.error(
'{0} channel not found in {1}. No HISTORY will be written to '
'{2}.'.format(channel.name, history_plgname, pfx))
return
file_dict = history_obj.name_dict[channel.name]
chistory = []
ind = ' ' * indentchar
# NOTE: List comprehension too slow!
for key in file_dict:
if not key.startswith(pfx):
continue
for bnch in itervalues(file_dict[key]):
chistory.append('{0} {1}'.format(bnch.MODIFIED, bnch.DESCRIP))
# Add each HISTORY prettily into header, sorted by timestamp
for s in sorted(chistory):
for i in range(0, len(s), linechar):
subs = s[i:i+linechar]
if i > 0:
subs = ind + subs.lstrip()
hdu.header.add_history(subs)
def _write_header(self, image, hdu):
"""Write header from image object to given HDU."""
hduhdr = hdu.header
# Ginga image header object for the given extension only.
# Cannot use get_header() because that might also return PRI hdr.
ghdr = image.metadata['header']
for key in ghdr:
# Need this to avoid duplication because COMMENT is a weird field
if key.upper() == 'COMMENT':
continue
bnch = ghdr.get_card(key)
# Insert new keyword
if key not in hduhdr:
hduhdr[key] = (bnch.value, bnch.comment)
# Update existing keyword
elif hduhdr[key] != bnch.value:
hduhdr[key] = bnch.value
def _write_mosaic(self, key, outfile):
"""Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS.
"""
maxsize = self.settings.get('max_mosaic_size', 1e8) # Default 10k x 10k
channel = self.fv.get_channel(self.chname)
image = channel.datasrc[key]
# Prevent writing very large mosaic
if (image.width * image.height) > maxsize:
s = 'Mosaic too large to be written {0}'.format(image.shape)
self.w.status.set_text(s)
self.logger.error(s)
return
# Insert mosaic data and header into output HDU
hdu = fits.PrimaryHDU(image.get_data())
self._write_header(image, hdu)
# Write history to PRIMARY
self._write_history(key, hdu)
# Write to file
hdu.writeto(outfile, clobber=True)
def _write_mef(self, key, extlist, outfile):
"""Write out regular multi-extension FITS data."""
channel = self.fv.get_channel(self.chname)
with fits.open(outfile, mode='update') as pf:
# Process each modified data extension
for idx in extlist:
k = '{0}[{1}]'.format(key, self._format_extname(idx))
image = channel.datasrc[k]
# Insert data and header into output HDU
pf[idx].data = image.get_data()
self._write_header(image, pf[idx])
# Write history to PRIMARY
self._write_history(key, pf['PRIMARY'])
def toggle_save_cb(self, w, res_dict):
"""Only enable saving if something is selected."""
if len(res_dict) > 0:
self.w.save.set_enabled(True)
else:
self.w.save.set_enabled(False)
def save_images(self):
"""Save selected images.
This uses Astropy FITS package to save the outputs no matter
what user chose to load the images.
"""
res_dict = self.treeview.get_selected()
clobber = self.settings.get('clobber', False)
self.treeview.clear_selection() # Automatically disables Save button
# If user gives empty string, no suffix.
if self.suffix:
sfx = '_' + self.suffix
else:
sfx = ''
# Also include channel name in suffix. This is useful if user likes to
# open the same image in multiple channels.
if self.settings.get('include_chname', True):
sfx += '_' + self.chname
# Process each selected file. Each can have multiple edited extensions.
for infile in res_dict:
f_pfx = os.path.splitext(infile)[0] # prefix
f_ext = '.fits' # Only FITS supported
oname = f_pfx + sfx + f_ext
outfile = os.path.join(self.outdir, oname)
self.w.status.set_text(
'Writing out {0} to {1} ...'.format(shorten_name(infile, 10),
shorten_name(oname, 10)))
self.logger.debug(
'Writing out {0} to {1} ...'.format(infile, oname))
if os.path.exists(outfile) and not clobber:
self.logger.error('{0} already exists'.format(outfile))
continue
bnch = res_dict[infile]
if bnch.path is None or not os.path.isfile(bnch.path):
self._write_mosaic(f_pfx, outfile)
else:
shutil.copyfile(bnch.path, outfile)
self._write_mef(f_pfx, bnch.extlist, outfile)
self.logger.info('{0} written'.format(outfile))
self.w.status.set_text('Saving done, see log')
def close(self):
self.fv.stop_global_plugin(str(self))
def start(self):
self.instructions()
self.resume()
def resume(self):
# turn off any mode user may be in
try:
self.modes_off()
except AttributeError:
pass
self.fv.show_status('See instructions')
def stop(self):
self.gui_up = False
self.fv.show_status('')
def __str__(self):
"""
This method should be provided and should return the lower case
name of the plugin.
"""
return 'saveimage'
# Replace module docstring with config doc for auto insert by Sphinx.
# In the future, if we need the real docstring, we can append instead of
# overwrite.
from ginga.util.toolbox import generate_cfg_example
__doc__ = generate_cfg_example('plugin_SaveImage', package='ginga')
| |
from __future__ import print_function, unicode_literals
from ._compat import u, string_types, import_
#coding=utf8
# Parsing Markdown
# This version has some differences between Standard Markdown
# Syntax according from http://daringfireball.net/projects/markdown/syntax
#
# They are:
# || `^super^script` || <sup>super</sup>script ||
# || `,,sub,,script` || <sub>sub</sub>script ||
# || `~~strikeout~~` || <span style="text-decoration: line-through">strikeout</span> ||
#
# directly url and image support, e.g.:
# http://code.google.com/images/code_sm.png
# http://www.google.com
# Table support
# Difinition list support <dl><dt><dd>
# github flavored Markdown support:
# Multiple underscores in words
# Fenced code blocks
# Syntax highlighting
#
# 2013/7/11
# * Add wiki_link support [[xxx]]
# * Remove old block support
# * Add head line id support
# ## header2 ## {#id}
# * Add ~~~ code block support
# * Add inner and outter anchor class
# * Add header anchor notation
# * Add footnote support
#
#
from par.pyPEG import *
import re
import types
from par.gwiki import WikiGrammar, WikiHtmlVisitor, SimpleVisitor
_ = re.compile
class MarkdownGrammar(WikiGrammar):
def __init__(self):
super(MarkdownGrammar, self).__init__()
def _get_rules(self):
# 0 ?
# -1 *
# -2 +
#basic
def ws(): return _(r'\s+')
def space(): return _(r'[ \t]+')
def eol(): return _(r'\r\n|\r|\n')
def seperator(): return _(r'[\.,!?\-$ \t\^]')
#hr
def hr1(): return _(r'\*[ \t]*\*[ \t]*\*[ \t]*[\* \t]*'), -2, blankline
def hr2(): return _(r'-[ \t]*-[ \t]*-[ \t]*[- \t]*'), -2, blankline
def hr3(): return _(r'_[ \t]*_[ \t]*_[ \t]*[_ \t]*'), -2, blankline
def hr(): return [hr1, hr2, hr3]
#html block
def html_block(): return _(r'<(table|pre|div|p|ul|h1|h2|h3|h4|h5|h6|blockquote|code).*?>.*?<(/\1)>', re.I|re.DOTALL), -2, blankline
def html_inline_block(): return _(r'<(span|del|font|a|b|code|i|em|strong|sub|sup).*?>.*?<(/\1)>|<(img|br).*?/>', re.I|re.DOTALL)
#paragraph
def blankline(): return 0, space, eol
def identifer(): return _(r'[a-zA-Z_][a-zA-Z_0-9]*', re.U)
def htmlentity(): return _(r'&\w+;')
def literal(): return _(r'u?r?"[^"\\]*(?:\\.[^"\\]*)*"', re.I|re.DOTALL)
def literal1(): return _(r"u?r?'[^'\\]*(?:\\.[^'\\]*)*'", re.I|re.DOTALL)
def escape_string(): return _(r'\\'), _(r'.')
def simple_op(): return _(r'[ \t]+(\*\*|__|\*|_|~~|\^|,,)(?=\r|\n|[ \t]+)')
def op_string(): return _(r'\*\*\*|\*\*|\*|___|__|_|~~|\^|,,')
def op(): return [(-1, seperator, op_string), (op_string, -1, seperator)]
def string(): return _(r'[^\\\*_\^~ \t\r\n`,<\[]+', re.U)
def code_string_short(): return _(r'`'), _(r'[^`]*'), _(r'`')
def code_string(): return _(r'``'), _(r'.+(?=``)'), _(r'``')
def default_string(): return _(r'\S+')
def underscore_words(): return _(r'[\w\d]+_[\w\d]+[\w\d_]*')
# def word(): return [escape_string, code_string,
# code_string_short, htmlentity, underscore_words, op, link,
# html_inline_block, inline_tag, string, default_string]
def word(): return [escape_string, code_string,
code_string_short, htmlentity, footnote, link,
html_inline_block, inline_tag, string, default_string]
# def words(): return [simple_op, word], -1, [simple_op, space, word]
def words(): return -1, [word, space]
def line(): return 0, space, words, eol
def paragraph(): return line, -1, (0, space, common_line), -1, blanklines
def blanklines(): return -2, blankline
#footnote
def footnote(): return _(r'\[\^\w+\]')
def footnote_text(): return list_first_para, -1, [list_content_indent_lines, list_content_lines]
def footnote_desc():
return footnote, _(r':'), footnote_text
#custome inline tag
def inline_tag_name(): return _(r'[^\}:]*')
def inline_tag_index(): return _(r'[^\]]*')
def inline_tag_class(): return _(r'[^\}:]*')
def inline_tag():
return _(r'\{'), inline_tag_name, 0, (_(r':'), inline_tag_class), _(r'\}'), 0, space, _(r'\['), inline_tag_index, _(r'\]')
#pre
def indent_line_text(): return _(r'.+')
def indent_line(): return _(r'[ ]{4}|\t'), indent_line_text, eol
def indent_block(): return -2, [indent_line, blankline]
def pre_lang(): return 0, space, 0, (block_kwargs, -1, (_(r','), block_kwargs))
def pre_text1(): return _(r'.+?(?=```|~~~)', re.M|re.DOTALL)
def pre_text2(): return _(r'.+?(?=</code>)', re.M|re.DOTALL)
def pre_extra1(): return _(r'```|~{3,}'), 0, pre_lang, 0, space, eol, pre_text1, _(r'```|~{3,}'), -2, blankline
def pre_extra2(): return _(r'<code>'), 0, pre_lang, 0, space, eol, pre_text2, _(r'</code>'), -2, blankline
def pre(): return [indent_block, pre_extra1, pre_extra2]
#class and id definition
def attr_def_id(): return _(r'#[^\s\}]+')
def attr_def_class(): return _(r'\.[^\s\}]+')
def attr_def_set(): return [attr_def_id, attr_def_class], -1, (space, [attr_def_id, attr_def_class])
def attr_def(): return _(r'\{'), attr_def_set, _(r'\}')
#subject
def setext_title1(): return title_text, 0, space, 0, attr_def, blankline, _(r'={1,}'), -2, blankline
def setext_title2(): return title_text, 0, space, 0, attr_def, blankline, _(r'-{1,}'), -2, blankline
def title_text(): return _(r'.+?(?= #| \{#| \{\.)|.+', re.U)
def atx_title1(): return _(r'# '), title_text, 0, _(r' #+'), 0, space, 0, attr_def, -2, blankline
def atx_title2(): return _(r'## '), title_text, 0, _(r' #+'), 0, space, 0, attr_def, -2, blankline
def title1(): return [atx_title1, setext_title1]
def title2(): return [atx_title2, setext_title2]
def title3(): return _(r'### '), title_text, 0, _(r' #+'), 0, space, 0, attr_def, -2, blankline
def title4(): return _(r'#### '), title_text, 0, _(r' #+'), 0, space, 0, attr_def, -2, blankline
def title5(): return _(r'##### '), title_text, 0, _(r' #+'), 0, space, 0, attr_def, -2, blankline
def title6(): return _(r'###### '), title_text, 0, _(r' #+'), 0, space, 0, attr_def, -2, blankline
def title(): return [title6, title5, title4, title3, title2, title1]
#table
# def table_column(): return -2, [space, escape_string, code_string_short, code_string, op, link, _(r'[^\\\*_\^~ \t\r\n`,\|]+', re.U)], _(r'\|\|')
def table_column(): return _(r'.+?(?=\|\|)'), _(r'\|\|')
def table_line(): return _(r'\|\|'), -2, table_column, eol
def table(): return -2, table_line, -1, blankline
def table_td(): return _(r'[^\|\r\n]*\|')
def table_separator_line(): return _(r'\s*:?-+:?\s*\|')
def table_separator_char(): return _(r'\|')
def table_other(): return _(r'[^\r\n]+')
def table_head():
return 0, _(r'\|'), -2, table_td, -1, table_other, blankline
def table_separator():
return 0, _(r'\|'), -2, table_separator_line, -1, table_other, blankline
def table_body_line():
return 0, _(r'\|'), -2, table_td, -1, table_other, blankline
def table_body(): return -2, table_body_line
def table2():
return table_head, table_separator, table_body
#definition
def dl_dt_1(): return _(r'[^ \t\r\n]+.*--'), -2, blankline
def dl_dd_1(): return -1, [list_content_indent_lines, blankline]
def dl_dt_2(): return _(r'[^ \t\r\n]+.*'), -1, blankline
def dl_dd_2(): return _(r':'), _(r' {1,3}'), list_rest_of_line, -1, [list_content_indent_lines, blankline]
def dl_line_1(): return dl_dt_1, dl_dd_1
def dl_line_2(): return dl_dt_2, dl_dd_2
def dl(): return [dl_line_1, dl_line_2], -1, [blankline, dl_line_1, dl_line_2]
# def dl(): return -2, dl_line_1
#block
# [[tabs(filename=hello.html)]]:
# content
# def block_name(): return _(r'[a-zA-Z_\-][a-zA-Z_\-0-9]*')
def block_kwargs_key(): return _(r'[^=,\)\n]+')
def block_kwargs_value(): return _(r'[^\),\n]+')
def block_kwargs(): return block_kwargs_key, 0, (_(r'='), block_kwargs_value)
# def block_args(): return _(r'\('), 0, space, 0, (block_kwargs, -1, (_(r','), block_kwargs)), 0, space, _(r'\)')
# def block_head(): return _(r'\[\['), 0, space, block_name, 0, space, 0, block_args, 0, space, _(r'\]\]:'), eol
# def block_body(): return list_content_indent_lines
# def block_item(): return block_head, block_body
# def block(): return -2, block_item
#new block
# {% blockname para_name=para_value[, para_name, para_name=para_value] %}
# content
# {% endblockname %}
def new_block_args(): return 0, space, 0, (block_kwargs, -1, (_(r','), block_kwargs)), 0, space
def new_block_name(): return _(r'([a-zA-Z_\-][a-zA-Z_\-0-9]*)')
def new_block_head(): return _(r'\{%'), 0, space, new_block_name, new_block_args, _(r'%\}'), eol
def new_block_end(): return _(r'\{%'), 0, space, _(r'end\1'), 0, space, _(r'%\}'), eol
def new_block_item(): return new_block_head, new_block_body, new_block_end
# def new_block(): return -2, new_block_item
def new_block(): return _(r'\{%\s*([a-zA-Z_\-][a-zA-Z_\-0-9]*)(.*?)%\}(.*?)\{%\s*end\1\s*%\}', re.DOTALL), eol
#lists
def check_radio(): return _(r'\[[\*Xx ]?\]|<[\*Xx ]?>'), space
def common_text(): return _(r'(?:[^\-\+#\r\n\*>\d]|(?:\*|\+|-)\S+|>\S+|\d+\.\S+)[^\r\n]*')
def common_line(): return common_text, eol
def list_rest_of_line(): return _(r'.+'), eol
def list_first_para(): return 0, check_radio, list_rest_of_line, -1, (0, space, common_line), -1, blanklines
def list_content_text(): return list_rest_of_line, -1, [list_content_norm_line, blankline]
def list_content_line(): return _(r'[ \t]+([\*+\-]\S+|\d+\.[\S$]*|\d+[^\.]*|[^\-\+\r\n#>]).*')
def list_content_lines(): return list_content_norm_line, -1, [list_content_indent_lines, blankline]
def list_content_indent_line(): return _(r' {4}|\t'), list_rest_of_line
def list_content_norm_line(): return _(r' {1,3}'), common_line, -1, (0, space, common_line), -1, blanklines
def list_content_indent_lines(): return list_content_indent_line, -1, [list_content_indent_line, list_content_line], -1, blanklines
def list_content(): return list_first_para, -1, [list_content_indent_lines, list_content_lines]
def bullet_list_item(): return 0, _(r' {1,3}'), _(r'\*|\+|-'), space, list_content
def number_list_item(): return 0, _(r' {1,3}'), _(r'\d+\.'), space, list_content
def list_item(): return -2, [bullet_list_item, number_list_item]
def lists(): return -2, list_item, -1, blankline
#quote
def quote_text(): return _(r'[^\r\n]*'), eol
def quote_blank_line(): return _(r'>[ \t]*'), eol
def quote_line(): return _(r'> '), quote_text
def quote_lines(): return [quote_blank_line, quote_line]
def blockquote(): return -2, quote_lines, -1, blankline
#links
def protocal(): return [_(r'http://'), _(r'https://'), _(r'ftp://')]
def direct_link(): return _(r'(<)?(?:http://|https://|ftp://)[\w\d\-\.,@\?\^=%&:/~+#]+(?(1)>)')
def image_link(): return _(r'(<)?(?:http://|https://|ftp://).*?(?:\.png|\.jpg|\.gif|\.jpeg)(?(1)>)', re.I)
def mailto(): return _(r'<(mailto:)?[a-zA-Z_0-9-/\.]+@[a-zA-Z_0-9-/\.]+>')
def wiki_link(): return _(r'(\[\[)(.*?)((1)?\]\])')
def inline_text(): return _(r'[^\]\^]*')
def inline_image_alt(): return _(r'!\['), inline_text, _(r'\]')
def inline_image_title(): return literal
def inline_href(): return _(r'[^\s\)]+')
def inline_image_link(): return _(r'\('), inline_href, 0, space, 0, inline_link_title, 0, space, _(r'\)')
def inline_image(): return inline_image_alt, inline_image_link
def refer_image_alt(): return _(r'!\['), inline_text, _(r'\]')
def refer_image_refer(): return _(r'[^\]]*')
def refer_image(): return refer_image_alt, 0, space, _(r'\['), refer_image_refer, _(r'\]')
def refer_image_title(): return [literal, literal1, '\(.*?\)']
def inline_link_caption(): return _(r'\['), _(r'[^\]\^]*'), _(r'\]')
def inline_link_title(): return literal
def inline_link_link(): return _(r'\('), _(r'[^\s\)]+'), 0, space, 0, inline_link_title, 0, space, _(r'\)')
def inline_link(): return inline_link_caption, inline_link_link
def refer_link_caption(): return _(r'\['), _(r'[^\]\^]*'), _(r'\]')
def refer_link_refer(): return _(r'[^\]]*')
def refer_link(): return refer_link_caption, 0, space, _(r'\['), refer_link_refer, _(r'\]')
def refer_link_link(): return 0, _(r'(<)?(\S+)(?(1)>)')
def refer_link_title(): return [_(r'\([^\)]*\)'), literal, literal1]
def refer_link_note(): return 0, _(r' {1,3}'), inline_link_caption, _(r':'), space, refer_link_link, 0, (ws, refer_link_title), -2, blankline
def link(): return [inline_image, refer_image, inline_link, refer_link, image_link, direct_link, wiki_link, mailto], -1, space
#article
def article(): return -1, [blanklines, hr, title, refer_link_note, pre, html_block, table, table2, lists, dl, blockquote, new_block, footnote_desc, paragraph]
peg_rules = {}
for k, v in ((x, y) for (x, y) in list(locals().items()) if isinstance(y, types.FunctionType)):
peg_rules[k] = v
return peg_rules, article
class MarkdownHtmlVisitor(WikiHtmlVisitor):
op_maps = {
'`':['<code>', '</code>'],
'*':['<em>', '</em>'],
'_':['<em>', '</em>'],
'**':['<strong>', '</strong>'],
'***':['<strong><em>', '</em></strong>'],
'___':['<strong><em>', '</em></strong>'],
'__':['<strong>', '</strong>'],
'~~':['<span style="text-decoration: line-through">', '</span>'],
'^':['<sup>', '</sup>'],
',,':['<sub>', '</sub>'],
}
tag_class = {}
def __init__(self, template=None, tag_class=None, grammar=None,
title='Untitled', block_callback=None, init_callback=None,
wiki_prefix='/wiki/', footnote_id=None, filename=None):
super(MarkdownHtmlVisitor, self).__init__(template, tag_class,
grammar, title, block_callback, init_callback, filename=filename)
self.refer_links = {}
self.chars = list(self.op_maps.keys())
self.chars.sort(key=lambda x:len(x), reverse=True)
self.wiki_prefix = wiki_prefix
self.footnote_id = footnote_id or 1
self.footnodes = []
def visit(self, nodes, root=False):
if root:
for obj in nodes[0].find_all('refer_link_note'):
self.visit_refer_link_note(obj)
return super(MarkdownHtmlVisitor, self).visit(nodes, root)
def parse_text(self, text, peg=None):
g = self.grammar
if isinstance(peg, string_types):
peg = g[peg]
resultSoFar = []
result, rest = g.parse(text, root=peg, resultSoFar=resultSoFar, skipWS=False)
v = self.__class__('', self.tag_class, g, block_callback=self.block_callback,
init_callback=self.init_callback, wiki_prefix=self.wiki_prefix,filename=self.filename,
footnote_id=self.footnote_id)
v.refer_links = self.refer_links
r =v.visit(result)
self.footnote_id = v.footnote_id
return r
def process_line(self, line):
chars = self.chars
op_maps = self.op_maps
buf = []
pos = [] #stack of special chars
i = 0
codes = re.split('([ \t\r\n.,?:]+)', line)
while i<len(codes):
left = codes[i]
#process begin match
for c in chars:
if left.startswith(c):
p = left[len(c):]
if p:
buf.append(c)
pos.append(len(buf)-1)
left = p
else:
buf.append(left)
left = ''
break
#process end match
if left:
for c in chars:
if left.endswith(c):
p = left[:-len(c)]
if p:
while len(pos) > 0:
t = pos.pop()
if buf[t] == c:
buf[t] = op_maps[c][0]
buf.append(p)
buf.append(op_maps[c][1])
left = ''
break
break
if left:
buf.append(left)
i += 1
if i < len(codes):
buf.append(codes[i])
i += 1
return ''.join(buf)
def visit_string(self, node):
return self.to_html(node.text)
def visit_blanklines(self, node):
return '\n'
def visit_blankline(self, node):
return '\n'
def _get_title(self, node, level):
if node.find('attr_def_id'):
_id = node.find('attr_def_id').text[1:]
else:
_id = self.get_title_id(level)
anchor = '<a class="anchor" href="#%s">¶</a>' % _id
title = node.find('title_text').text.strip()
self.titles.append((level, _id, title))
#process classes
_cls = []
for x in node.find_all('attr_def_class'):
_cls.append(x.text[1:])
return self.tag('h'+u(level), title + anchor, id=_id, _class=' '.join(_cls))
def visit_title1(self, node):
return self._get_title(node, 1)
def visit_setext_title1(self, node):
return node[0]
def visit_atx_title1(self, node):
return node[1].text
def visit_title2(self, node):
return self._get_title(node, 2)
def visit_setext_title2(self, node):
return node[0]
def visit_atx_title2(self, node):
return node[1].text
def visit_title3(self, node):
return self._get_title(node, 3)
def visit_title4(self, node):
return self._get_title(node, 4)
def visit_title5(self, node):
return self._get_title(node, 5)
def visit_title6(self, node):
return self._get_title(node, 6)
def visit_indent_block_line(self, node):
return node[1].text
def visit_indent_line(self, node):
return node.find('indent_line_text').text + '\n'
def visit_paragraph(self, node):
txt = node.text.rstrip().replace('\n', ' ')
text = self.parse_text(txt, 'words')
return self.tag('p', self.process_line(text))
def visit_pre(self, node):
lang = node.find('pre_lang')
kwargs = {}
cwargs = {}
if lang:
for n in lang.find_all('block_kwargs'):
k = n.find('block_kwargs_key').text.strip()
v_node = n.find('block_kwargs_value')
if v_node:
v = v_node.text.strip()
if k == 'lang':
k = 'class'
v = 'language-' + v
cwargs[k] = v
continue
else:
v = 'language-' + k
k = 'class'
cwargs[k] = v
continue
kwargs[k] = v
return self.tag('pre', self.tag('code',self.to_html(self.visit(node).rstrip()), newline=False, **cwargs), **kwargs)
def visit_pre_extra1(self, node):
return node.find('pre_text1').text.rstrip()
def visit_pre_extra2(self, node):
return node.find('pre_text2').text.rstrip()
def visit_inline_link(self, node):
kwargs = {'href':node[1][1]}
if len(node[1])>3:
kwargs['title'] = node[1][3].text[1:-1]
caption = node[0].text[1:-1].strip()
if not caption:
caption = kwargs['href']
return self.tag('a', caption, newline=False, **kwargs)
def visit_inline_image(self, node):
kwargs = {}
kwargs['src'] = node.find('inline_href').text
title = node.find('inline_link_title')
if title:
kwargs['title'] = title.text[1:-1]
alt = node.find('inline_text')
if alt:
kwargs['alt'] = alt.text
return self.tag('img', enclose=1, **kwargs)
def visit_refer_link(self, node):
caption = node.find('refer_link_caption')[1]
key = node.find('refer_link_refer')
if not key:
key = caption
else:
key = key.text
return self.tag('a', caption, **self.refer_links.get(key.upper(), {}))
def visit_refer_image(self, node):
kwargs = {}
alt = node.find('refer_image_alt')
if alt:
alt = alt.find('inline_text').text
else:
alt = ''
key = node.find('refer_image_refer')
if not key:
key = alt
else:
key = key.text
d = self.refer_links.get(key.upper(), {})
kwargs.update({'src':d.get('href', ''), 'title':d.get('title', '')})
return self.tag('img', enclose=1, **kwargs)
def visit_refer_link_note(self, node):
key = node.find('inline_link_caption').text[1:-1].upper()
self.refer_links[key] = {'href':node.find('refer_link_link').text}
r = node.find('refer_link_title')
if r:
self.refer_links[key]['title'] = r.text[1:-1]
return ''
def template(self, node):
body = self.visit(node, True)
return self._template % {'title':self.title, 'body':body}
def visit_direct_link(self, node):
t = node.text
if t.startswith('<'):
t = t[1:-1]
href = t
return self.tag('a', href, newline=False, href=href)
def visit_wiki_link(self, node):
"""
[[(type:)name(#anchor)(|alter name)]]
type = 'wiki', or 'image'
if type == 'wiki':
[[(wiki:)name(#anchor)(|alter name)]]
if type == 'image':
[[(image:)filelink(|align|width|height)]]
float = 'left', 'right'
width or height = '' will not set
"""
urljoin = import_('urllib.parse', ['urljoin'], via='urlparse')
t = node.text[2:-2].strip()
type = 'wiki'
begin = 0
if t[:6].lower() == 'image:':
type = 'image'
begin = 6
elif t[:5].lower() == 'wiki:':
type = 'wiki'
begin = 5
t = t[begin:]
if type == 'wiki':
_v, caption = (t.split('|', 1) + [''])[:2]
name, anchor = (_v.split('#', 1) + [''])[:2]
if not caption:
caption = name
_prefix = self.wiki_prefix
if not name:
_prefix = ''
name = '#' + anchor
else:
name = _v
return self.tag('a', caption, href="%s" % urljoin(_prefix, name))
elif type == 'image':
_v = (t.split('|') + ['', '', ''])[:4]
filename, align, width, height = _v
cls = ''
if width:
if width.isdigit():
cls += ' width="%spx"' % width
else:
cls += ' width="%s"' % width
if height:
if height.isdigit():
cls += ' height="%spx"' % height
else:
cls += ' height="%s"' % height
s = '<img src="%s" %s/>' % (filename, cls)
if align:
s = '<div class="float%s">%s</div>' % (align, s)
return s
def visit_image_link(self, node):
t = node.text
if t.startswith('<'):
e = -1
b = 1
else:
b = 0
e = len(t)
href = t[b:e]
return self.tag('img', src=href, enclose=1)
def visit_mailto(self, node):
href = node.text[1:-1]
if href.startswith('mailto:'):
href = href[7:]
def shuffle(text):
import random
t = []
for x in text:
if random.choice('01') == '1':
t.append('&#x%X;' % ord(x))
else:
t.append(x)
return ''.join(t)
return self.tag('a', shuffle(href), href=shuffle("mailto:"+href), newline=False)
def visit_quote_line(self, node):
return node.text[2:]
def visit_quote_blank_line(self, node):
return '\n'
def visit_blockquote(self, node):
text = []
for line in node.find_all('quote_lines'):
text.append(self.visit(line))
result = self.parse_text(''.join(text), 'article')
return self.tag('blockquote', result)
def visit_lists_begin(self, node):
self.lists = []
return ''
def visit_list_content_line(self, node):
return node.text.strip()
def visit_list_content_indent_line(self, node):
return node.find('list_rest_of_line').text
def visit_bullet_list_item(self, node):
self.lists.append(('b', node.find('list_content')))
return ''
def visit_number_list_item(self, node):
self.lists.append(('n', node.find('list_content')))
return ''
def visit_check_radio(self, node):
tag = []
if node.text[0] == '[':
tag.append('<input type="checkbox"')
else:
tag.append('<input type="radio"')
if node.text[1] == '*' or node.text[1].upper() == 'X':
tag.append(' checked')
tag.append('\></input> ')
return ''.join(tag)
def visit_lists_end(self, node):
def process_node(n):
txt = []
for node in n:
txt.append(self.visit(node))
text = ''.join(txt)
# print '------------------'
# print text
# print '=================='
t = self.parse_text(text, 'article').rstrip()
if t.count('<p>') == 1 and t.startswith('<p>') and t.endswith('</p>'):
ret = t[3:-4].rstrip()
else:
ret = t
return ret
def create_list(lists):
buf = []
old = None
parent = None
for _type, _node in lists:
if _type == old:
buf.append(self.tag('li', process_node(_node)))
else:
#find another list
if parent:
buf.append('</' + parent + '>\n')
if _type == 'b':
parent = 'ul'
else:
parent = 'ol'
buf.append(self.tag(parent))
buf.append(self.tag('li', process_node(_node)))
old = _type
if buf:
buf.append('</' + parent + '>\n')
return ''.join(buf)
return create_list(self.lists)
def visit_dl_begin(self, node):
return self.tag('dl')
def visit_dl_end(self, node):
return '</dl>'
def visit_dl_dt_1(self, node):
txt = node.text.rstrip()[:-3]
text = self.parse_text(txt, 'words')
return self.tag('dt', self.process_line(text), enclose=1)
def visit_dl_dd_1(self, node):
txt = self.visit(node).rstrip()
text = self.parse_text(txt, 'article')
return self.tag('dd', text, enclose=1)
def visit_dl_dt_2(self, node):
txt = node.text.rstrip()
text = self.parse_text(txt, 'words')
return self.tag('dt', self.process_line(text), enclose=1)
def visit_dl_dd_2(self, node):
txt = self.visit(node).rstrip()
text = self.parse_text(txt[1:].lstrip(), 'article')
return self.tag('dd', text, enclose=1)
def visit_inline_tag(self, node):
rel = node.find('inline_tag_index').text.strip()
name = node.find('inline_tag_name').text.strip()
_c = node.find('inline_tag_class')
rel = rel or name
if _c:
cls = ' '+_c.text.strip()
else:
cls = ''
return ('<span class="inline-tag%s" data-rel="' % cls )+rel+'">'+name+'</span>'
def visit_new_block(self, node):
block = {'new':True}
r = re.compile(r'\{%\s*([a-zA-Z_\-][a-zA-Z_\-0-9]*)\s*(.*?)%\}(.*?)\{%\s*end\1\s*%\}', re.DOTALL)
m = r.match(node.text)
if m:
block['name'] = m.group(1)
block_args = m.group(2).strip()
block['body'] = m.group(3).strip()
resultSoFar = []
result, rest = self.grammar.parse(block_args, root=self.grammar['new_block_args'], resultSoFar=resultSoFar, skipWS=False)
kwargs = {}
for node in result[0].find_all('block_kwargs'):
k = node.find('block_kwargs_key').text.strip()
v = node.find('block_kwargs_value')
if v:
v = v.text.strip()
kwargs[k] = v
block['kwargs'] = kwargs
func = self.block_callback.get(block['name'])
if func:
return func(self, block)
else:
return ''
# return node.text
def visit_table_column(self, node):
text = self.parse_text(node.text[:-2].strip(), 'words')
return self.tag('td', self.process_line(text), newline=False)
def visit_table2_begin(self, node):
self.table_align = {}
separator = node.find('table_separator')
for i, x in enumerate(list(separator.find_all('table_separator_line')) +
list(separator.find_all('table_other'))):
t = x.text
if t.endswith('|'):
t = t[:-1]
t = t.strip()
left = t.startswith(':')
right = t.endswith(':')
if left and right:
align = 'center'
elif left:
align = 'left'
elif right:
align = 'right'
else:
align = ''
self.table_align[i] = align
return self.tag('table', newline=True)
def visit_table2_end(self, node):
return '</table>\n'
def visit_table_head(self, node):
s = ['<thead>\n<tr>']
for t in ('table_td', 'table_other'):
for x in node.find_all(t):
text = x.text
if text.endswith('|'):
text = text[:-1]
s.append('<th>%s</th>' % self.process_line(text.strip()))
s.append('</tr>\n</thead>\n')
return ''.join(s)
def visit_table_separator(self, node):
return ''
def visit_table_body(self, node):
s = ['<tbody>\n']
s.append(self.visit(node))
s.append('</tbody>')
return ''.join(s)
def visit_table_body_line(self, node):
s = ['<tr>']
def get_node():
for t in ('table_td', 'table_other'):
for x in node.find_all(t):
yield x
for i, x in enumerate(get_node()):
text = x.text
if text.endswith('|'):
text = text[:-1]
s.append(self.tag('td', self.process_line(text.strip()),
align=self.table_align.get(i, ''), newline=False, enclose=2))
s.append('</tr>\n')
return ''.join(s)
def visit_footnote(self, node):
name = node.text[2:-1]
_id = self.footnote_id
self.footnote_id += 1
return '<sup id="fnref-%s"><a href="#fn-%s" class="footnote-rel inner">%d</a></sup>' % (name, name, _id)
def visit_footnote_desc(self, node):
name = node.find('footnote').text[2:-1]
if name in self.footnodes:
raise Exception("The footnote %s is already existed" % name)
txt = self.visit(node.find('footnote_text')).rstrip()
text = self.parse_text(txt, 'article')
n = {'name':'%s' % name, 'text':text}
self.footnodes.append(n)
return ''
def __end__(self):
s = []
if len(self.footnodes):
s.append('<div class="footnotes"><ol>')
for n in self.footnodes:
name = n['name']
s.append('<li id="fn-%s">' % (name,))
s.append(n['text'])
s.append(self.tag('a', '↩', href='#fnref-%s' % name, _class='footnote-backref'))
s.append('</li>')
s.append('</ol></div>')
return '\n'.join(s)
def parseHtml(text, template=None, tag_class=None, block_callback=None,
init_callback=None, filename=None, grammer=None, visitor=None):
template = template or ''
tag_class = tag_class or {}
g = (grammer or MarkdownGrammar)()
resultSoFar = []
result, rest = g.parse(text, resultSoFar=resultSoFar, skipWS=False)
v = (visitor or MarkdownHtmlVisitor)(template, tag_class, g,
block_callback=block_callback,
init_callback=init_callback,
filename=filename)
return v.template(result)
def parseText(text, filename=None, grammer=None, visitor=None):
g = (grammer or MarkdownGrammar)()
resultSoFar = []
result, rest = g.parse(text, resultSoFar=resultSoFar, skipWS=False)
v = (visitor or SimpleVisitor)(g, filename=filename)
return v.visit(result, root=True)
| |
#!/usr/bin/env python
import sys
import os
import subprocess
import platform
import threading
import hyperspeed
import json
try:
import gtk
import gobject
import pango
except ImportError as e:
print e
gtk = False
# import ctypes
# try:
# # ctypes.CDLL("/home/mistika/MISTIKA-ENV/bin/lib/libX11.so.6", mode = ctypes.RTLD_GLOBAL)
# # ctypes.CDLL(hyperspeed.folder+"/res/lib/libxcb-xlib.so.0", mode = ctypes.RTLD_GLOBAL)
# ctypes.CDLL(hyperspeed.folder+"/res/lib/ld-linux-x86-64.so.2", mode = ctypes.RTLD_GLOBAL)
# ctypes.CDLL(hyperspeed.folder+"/res/lib/libc.so.6", mode = ctypes.RTLD_GLOBAL)
# # OSError: /home/mistika/mistika-hyperspeed/res/lib/libc.so.6: symbol _dl_starting_up, version GLIBC_PRIVATE not defined in file ld-linux-x86-64.so.2 with link time reference
# sys.path.insert(1, os.path.join(hyperspeed.folder, 'res/lib/gtk-2.0'))
# import gtk
# import gobject
# import pango
# except ImportError as e:
# print e
# import hyperspeed.sockets
# try:
# args = sys.argv
# args[0] = os.path.abspath(args[0])
# hyperspeed.sockets.launch(args)
# sys.exit(0)
# except IOError as e:
# print e
# print 'Could not launch %s' % __file__
# sys.exit(1)
def get_script_settings_path(script_path=False):
if not script_path:
script_path = os.path.realpath(sys.argv[0])
if not script_path.endswith('.cfg'): # Just to be sure we don't overwrite the script
script_path = os.path.splitext(script_path)[0]
if script_path.startswith(hyperspeed.folder):
settings_path = script_path.replace(hyperspeed.folder, hyperspeed.config_folder)+'.cfg'
settings_folder = os.path.dirname(settings_path)
if not os.path.isdir(settings_folder):
try:
os.makedirs(settings_folder)
except OSError as e:
settings_path = script_path+'.cfg'
else:
settings_path = script_path+'.cfg'
return settings_path
class TerminalReplacement(gtk.Window):
def __init__(self, method, inputs=False, default_folder=False):
super(TerminalReplacement, self).__init__()
screen = self.get_screen()
self.set_size_request(screen.get_width()/2-100, screen.get_height()-200)
self.set_border_width(20)
self.set_position(gtk.WIN_POS_CENTER)
self.connect("key-press-event",self.on_key_press_event)
self.method = method
self.default_folder = default_folder
self.gui_ref = {}
vbox = gtk.VBox(False, 10)
for i, input_label in enumerate(inputs):
input_id = 'input %i' % i
self.gui_ref[input_id] = {}
hbox = gtk.HBox(False, 10)
label = gtk.Label(input_label+':')
hbox.pack_start(label, False, False, 0)
label = self.gui_ref[input_id]['label'] = gtk.Entry()
hbox.pack_start(label, True, True, 0)
button = self.gui_ref[input_id]['button'] = gtk.Button('...')
button.connect("clicked", self.add_files_dialog, input_id, input_label)
hbox.pack_start(button, False, False, 0)
vbox.pack_start(hbox, False, False, 0)
button = gtk.Button('Go')
button.connect("clicked", self.run)
vbox.pack_start(button, False, False, 5)
textview = self.textview = gtk.TextView()
fontdesc = pango.FontDescription("monospace")
textview.modify_font(fontdesc)
# textview.set_editable(False)
scroll = gtk.ScrolledWindow()
scroll.add(textview)
# expander = gtk.Expander("Details")
# expander.add(scroll)
vbox.pack_start(scroll, True, True, 5)
self.add(vbox)
self.connect("destroy", gtk.main_quit)
self.show_all()
gobject.idle_add(self.present)
def run(self, widget):
inputs = []
for input_id in self.gui_ref:
inputs += self.gui_ref[input_id]['label'].get_text().split(', ')
self.method(inputs, self.prnt)
def prnt(self, string):
gobject.idle_add(self.textview.get_buffer().insert_at_cursor, string+'\n')
def add_files_dialog(self, widget, input_id, input_label):
if self.default_folder:
folder = self.default_folder
else:
folder = os.path.expanduser('~')
dialog = gtk.FileChooserDialog(title=input_label, parent=None, action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK), backend=None)
# if 'darwin' in platform.system().lower():
# dialog.set_resizable(False) # Because resizing crashes the app on Mac
dialog.set_select_multiple(True)
#dialog.add_filter(filter)
dialog.set_current_folder(folder)
# filter = gtk.FileFilter()
# filter.set_name("Xml files")
# filter.add_pattern("*.xml")
response = dialog.run()
if response == gtk.RESPONSE_OK:
files = dialog.get_filenames()
dialog.destroy()
self.gui_ref[input_id]['label'].set_text(', '.join(files))
elif response == gtk.RESPONSE_CANCEL:
print 'Closed, no files selected'
dialog.destroy()
return
def on_key_press_event(self,widget,event):
keyval = event.keyval
keyval_name = gtk.gdk.keyval_name(keyval)
state = event.state
ctrl = (state & gtk.gdk.CONTROL_MASK)
command = (state & gtk.gdk.MOD1_MASK)
if ctrl or command and keyval_name == 'q':
self.on_quit(widget)
else:
return False
return True
class Window(gtk.Window):
quit = False
def __init__(self, title, settings_default, icon_path=None):
super(Window, self).__init__()
self.history = []
self.hotkeys = [
{
'combination' : ['Ctrl', 'q'],
'method' : self.on_quit,
'args' : ['Keyboard shortcut']
},
{
'combination' : ['Ctrl', 'z'],
'method' : self.undo,
'args' : [1]
},
]
settings = self.settings = settings_default
self.settings_load()
screen = self.get_screen()
monitor = screen.get_monitor_geometry(0)
self.set_title(title)
self.set_border_width(20)
self.set_position(gtk.WIN_POS_CENTER)
if 'darwin' in platform.system().lower():
self.set_resizable(False) # Because resizing crashes the app on Mac
self.set_size_request(monitor.width-200, monitor.height-200)
if 'window_size' in settings:
self.set_default_size(settings['window_size']['width'], settings['window_size']['height'])
else:
self.set_default_size(monitor.width-200, monitor.height-200)
self.connect("key-press-event",self.on_key_press_event)
if not icon_path:
icon_path = os.path.join(hyperspeed.folder, 'res/img/hyperspeed_1024px.png')
self.set_icon_list(
gtk.gdk.pixbuf_new_from_file_at_size(icon_path, 16, 16),
gtk.gdk.pixbuf_new_from_file_at_size(icon_path, 32, 32),
gtk.gdk.pixbuf_new_from_file_at_size(icon_path, 64, 64),
gtk.gdk.pixbuf_new_from_file_at_size(icon_path, 128, 128),
gtk.gdk.pixbuf_new_from_file_at_size(icon_path, 256, 256),
)
self.connect('check-resize', self.on_window_resize)
self.connect("destroy", self.on_quit)
# gtkrc = '''
# style "theme-fixes" {
# font_name = "sans normal 12"
# }
# class "*" style "theme-fixes"'''
# gtk.rc_parse_string(gtkrc)
def on_window_resize(self, window):
width, height = self.get_size()
self.set_settings({
'window_size' : {
'width' : width,
'height': height
}
})
# self.launch_thread(self.set_settings, [{
# 'window_size' : {
# 'width' : width,
# 'height': height
# }
# }])
def on_key_press_event(self,widget,event):
hotkeys = self.hotkeys
keyval = event.keyval
keyval_name = gtk.gdk.keyval_name(keyval)
state = event.state
ctrl = (state & gtk.gdk.CONTROL_MASK)
command = (state & gtk.gdk.MOD1_MASK)
combination = []
if (ctrl or command):
combination.append('Ctrl')
combination.append(keyval_name)
for hotkey in hotkeys:
if combination == hotkey['combination']:
if 'args' in hotkey:
args = hotkey['args']
else:
args = []
if 'kwargs' in hotkey:
kwargs = hotkey['kwargs']
else:
kwargs = {}
hotkey['method'](*args,**kwargs)
return True
return False
def on_quit(self, widget):
self.quit = True
if type(widget) is gtk.Button:
widget_name = widget.get_label() + ' button'
else:
widget_name = str(widget)
# print 'Closed by: ' + widget_name
gtk.main_quit()
def undo(self, steps):
history = self.history
while steps > 0 and len(history) > 0:
previous = history.pop()
previous.undo()
steps -= 1
def settings_load(self):
self.settings_path = get_script_settings_path()
try:
self.settings.update(json.loads(open(self.settings_path).read()))
except IOError:
# No settings found
pass
def set_settings(self, settings={}):
self.settings.update(settings)
t = threading.Thread(target=self.settings_store, name='Store settings')
t.setDaemon(True)
t.start()
def on_settings_change(self, widget, setting_key):
if hasattr(widget, 'get_active'): # Checkbox
value = widget.get_active()
elif hasattr(widget, 'get_text'): # Textbox
value = widget.get_text()
self.set_settings({
setting_key : value
})
def settings_store(self):
try:
open(self.settings_path, 'w').write(json.dumps(self.settings, sort_keys=True, indent=4))
return True
except IOError as e:
print 'Could not store settings. %s' % e
return False
def launch_thread(self, target, name=False, args=[], kwargs={}):
arg_strings = []
for arg in list(args):
arg_strings.append(repr(arg))
for k, v in kwargs.iteritems():
arg_strings.append('%s=%s' % (k, v))
if not name:
name = '%s(%s)' % (target, ', '.join(arg_strings))
t = threading.Thread(target=target, name=name, args=args, kwargs=kwargs)
t.setDaemon(True)
t.start()
return t
def dialog_yesno(parent, question, confirm_object=False, confirm_lock=False):
dialog = gtk.MessageDialog(
parent = parent,
flags=0,
type=gtk.MESSAGE_QUESTION,
buttons=gtk.BUTTONS_YES_NO,
message_format=question
)
dialog.set_position(gtk.WIN_POS_CENTER)
response = dialog.run()
dialog.destroy()
if response == -8:
status = True
else:
status = False
if confirm_object:
confirm_object[0] = status
if confirm_lock:
confirm_lock.release()
if status:
return True
else:
return False
def dialog_info(parent, message):
dialog = gtk.MessageDialog(
parent = parent,
flags=0,
type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_OK,
message_format=message
)
dialog.set_position(gtk.WIN_POS_CENTER)
response = dialog.run()
dialog.destroy()
def dialog_error(parent, message):
dialog = gtk.MessageDialog(
parent = parent,
flags=0,
type=gtk.MESSAGE_ERROR,
buttons=gtk.BUTTONS_OK,
message_format=message
)
dialog.set_position(gtk.WIN_POS_CENTER)
response = dialog.run()
dialog.destroy()
def event_debug(*args):
print repr(args)
| |
import bb
import oe.path
import glob
import hashlib
import os.path
import shutil
import string
import subprocess
VARIABLES = (
'IMAGE_ROOTFS',
'OSTREE_BRANCHNAME',
'OSTREE_COMMIT_SUBJECT',
'OSTREE_REPO',
'OSTREE_GPGDIR',
'OSTREE_GPGID',
'OSTREE_OS',
'OSTREE_REMOTE',
'OSTREE_BARE',
'OSTREE_ROOTFS',
'OSTREE_SYSROOT',
)
class OSTreeUpdate(string.Formatter):
"""
Create an OSTree-enabled version of an image rootfs, using an intermediate
per-image OSTree bare-user repository. Optionally export the content
of this repository into HTTP-exportable archive-z2 OSTree repository
which clients can use to pull the image in as an OSTree upgrade.
"""
WHITESPACES_ALLOWED = (
'OSTREE_COMMIT_SUBJECT',
)
def __init__(self, d):
for var in VARIABLES:
value = d.getVar(var)
if var not in self.WHITESPACES_ALLOWED:
for c in '\n\t ':
if c in value:
bb.fatal('%s=%s is not allowed to contain whitespace' % (var, value))
setattr(self, var, value)
self.gpg_sign = ''
if self.OSTREE_GPGID:
if self.OSTREE_GPGDIR:
self.gpg_sign += self.format(' --gpg-homedir={OSTREE_GPGDIR}')
self.gpg_sign += self.format(' --gpg-sign={OSTREE_GPGID}')
def get_value(self, key, args, kwargs):
"""
This class inherits string.Formatter and thus has self.format().
We extend the named field lookup so that object attributes and thus
the variables above can be used directly.
"""
if isinstance(key, str) and key not in kwargs:
return getattr(self, key)
else:
return super().get_value(key, args, kwargs)
def run_ostree(self, command, *args, **kwargs):
cmd = 'ostree ' + self.format(command, *args, **kwargs)
bb.debug(1, 'Running: {0}'.format(cmd))
output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
return output
def copy_sysroot(self):
"""
Seed the OSTree sysroot with the pristine one.
"""
bb.note(self.format('Copying pristine rootfs {IMAGE_ROOTFS} to OSTree sysroot {OSTREE_SYSROOT} ...'))
oe.path.copyhardlinktree(self.IMAGE_ROOTFS, self.OSTREE_SYSROOT)
def copy_kernel(self):
"""
Copy and checksum kernel, initramfs, and the UEFI app in place for OSTree.
TODO: why?
"""
uefidir = os.path.join(self.IMAGE_ROOTFS, 'boot')
uefibootdir = os.path.join(uefidir, 'EFI', 'BOOT')
uefiinternalbootdir = os.path.join(uefidir, 'EFI_internal_storage', 'BOOT')
uefiappname = glob.glob(os.path.join(uefibootdir, 'boot*.efi'))
if len(uefiappname) != 1:
bb.fatal(self.format('Ambiguous UEFI app in {0}: {1}', uefibootdir, uefiappname))
uefiappname = os.path.basename(uefiappname[0])
ostreeboot = os.path.join(self.OSTREE_SYSROOT, 'usr', 'lib', 'ostree-boot')
bb.note(self.format('Copying and checksumming UEFI combo app(s) {0} into OSTree sysroot {1} ...', uefiappname, ostreeboot))
bb.utils.mkdirhier(ostreeboot)
def copy_app(src, dst):
with open(src, 'rb') as f:
data = f.read()
chksum = hashlib.sha256(data).hexdigest()
with open(dst + '-' + chksum, 'wb') as f:
f.write(data)
shutil.copystat(src, dst + '-' + chksum)
return chksum
# OSTree doesn't care too much about the actual checksums on kernel
# and initramfs. We use the same checksum derived from the UEFI combo
# app for all parts related to it.
chksum = copy_app(os.path.join(uefibootdir, uefiappname),
os.path.join(ostreeboot, uefiappname + '.ext'))
copy_app(os.path.join(uefiinternalbootdir, uefiappname),
os.path.join(ostreeboot, uefiappname + '.int'))
# OSTree expects to find kernel and initramfs, so we provide it
# although the files are not used.
# TODO: does it really make sense to put the real content there?
# It's not going to get used.
bb.note('Extracting and checksumming kernel, initramfs for ostree...')
kernel = os.path.join(ostreeboot, 'vmlinuz')
initrd = os.path.join(ostreeboot, 'initramfs')
# TODO: where does objcopy come from?
#subprocess.check_output('objcopy --dump-section .linux=%s --dump-section .initrd=%s %s' %
# (kernel, initrd, os.path.join(uefibootdir, uefiappname)))
# os.rename(kernel, kernel + '-' + chksum)
# os.rename(initrd, initrd + '-' + chksum)
# For now just create dummy files.
open(kernel + '-' + chksum, 'w').close()
open(initrd + '-' + chksum, 'w').close()
def ostreeify_sysroot(self):
"""
Mangle sysroot into an OSTree-compatible layout.
"""
# Note that everything created/shuffled here will end up getting
# relocated under the ostree deployment directory for the image
# we're building. Everything that needs to get created relative in the
# to the final physical rootfs should be done in finalize_sysroot.
bb.note('* Shuffling sysroot to OSTree-compatible layout...')
# The OSTree deployment model requires the following directories
# and symlinks in place:
#
# /sysroot: the real physical rootfs bind-mounted here
# /sysroot/ostree: ostree repo and deployments ('checkouts')
# /ostree: symlinked to /sysroot/ostree for consistent access
#
# Additionally the deployment model suggests setting up deployment
# root symlinks for the following:
#
# /home -> /var/home (further linked -> /sysroot/home)
# /opt -> /var/opt
# /srv -> /var/srv
# /root -> /var/roothome
# /usr/local -> /var/local
# /mnt -> /var/mnt
# /tmp -> /sysroot/tmp
#
# In this model, /var can be a persistent second data partition.
# We just use one partition, so instead we have:
#
# /boot = mount point for persistent /boot directory in the root partition
# /var = mount point for persistent /ostree/deploy/refkit/var
# /home = mount point for persistent /home directory in the root partition
# /mnt = symlink to var/mnt
# /tmp = symlink to sysroot/tmp (persistent)
#
# Additionally,
# /etc is moved to /usr/etc as the default config
sysroot = os.path.join(self.OSTREE_SYSROOT, 'sysroot')
bb.utils.mkdirhier(sysroot)
os.symlink('sysroot/ostree', os.path.join(self.OSTREE_SYSROOT, 'ostree'))
for dir, link in (
('boot', None),
('var', None),
('home', None),
('mnt', 'var/mnt'),
('tmp', 'sysroot/tmp'),
):
path = os.path.join(self.OSTREE_SYSROOT, dir)
if os.path.isdir(path):
shutil.rmtree(path)
if link is None:
bb.utils.mkdirhier(path)
else:
os.symlink(link, path)
# Preserve read-only copy of /etc for OSTree's three-way merge.
os.rename(os.path.join(self.OSTREE_SYSROOT, 'etc'),
os.path.join(self.OSTREE_SYSROOT, 'usr', 'etc'))
def prepare_sysroot(self):
"""
Prepare a rootfs for committing into an OSTree repository.
"""
if os.path.isdir(self.OSTREE_SYSROOT):
bb.note(self.format('OSTree sysroot {OSTREE_SYSROOT} already exists, nuking it...'))
shutil.rmtree(self.OSTREE_SYSROOT)
bb.note(self.format('Preparing OSTree sysroot {OSTREE_SYSROOT} ...'))
self.copy_sysroot()
self.copy_kernel()
self.ostreeify_sysroot()
def populate_repo(self):
"""
Populate primary OSTree repository (bare-user mode) with the given sysroot.
"""
bb.note(self.format('Populating OSTree primary repository {OSTREE_BARE} ...'))
if os.path.isdir(self.OSTREE_BARE):
shutil.rmtree(self.OSTREE_BARE)
bb.utils.mkdirhier(self.OSTREE_BARE)
self.run_ostree('--repo={OSTREE_BARE} init --mode=bare-user')
self.run_ostree('--repo={OSTREE_BARE} commit '
'{gpg_sign} '
'--tree=dir={OSTREE_SYSROOT} '
'--branch={OSTREE_BRANCHNAME} '
'--subject="{OSTREE_COMMIT_SUBJECT}"')
output = self.run_ostree('--repo={OSTREE_BARE} summary -u')
bb.note(self.format('OSTree primary repository {OSTREE_BARE} summary:\n{0}', output))
def checkout_sysroot(self):
"""
Replicate the ostree repository into the OSTree rootfs and make a checkout/deploy.
"""
if os.path.isdir(self.OSTREE_ROOTFS):
shutil.rmtree(self.OSTREE_ROOTFS)
bb.note(self.format('Initializing OSTree rootfs {OSTREE_ROOTFS} ...'))
bb.utils.mkdirhier(self.OSTREE_ROOTFS)
self.run_ostree('admin --sysroot={OSTREE_ROOTFS} init-fs {OSTREE_ROOTFS}')
self.run_ostree('admin --sysroot={OSTREE_ROOTFS} os-init {OSTREE_OS}')
bb.note(self.format('Replicating primary OSTree repository {OSTREE_BARE} branch {OSTREE_BRANCHNAME} into OSTree rootfs {OSTREE_ROOTFS} ...'))
self.run_ostree('--repo={OSTREE_ROOTFS}/ostree/repo pull-local --remote=updates {OSTREE_BARE} {OSTREE_BRANCHNAME}')
bb.note('Deploying sysroot from OSTree sysroot repository...')
self.run_ostree('admin --sysroot={OSTREE_ROOTFS} deploy --os={OSTREE_OS} updates:{OSTREE_BRANCHNAME}')
# OSTree initialized var for our OS, but we want the original rootfs content instead.
src = os.path.join(self.IMAGE_ROOTFS, 'var')
dst = os.path.join(self.OSTREE_ROOTFS, 'ostree', 'deploy', self.OSTREE_OS, 'var')
bb.note(self.format('Copying /var from rootfs to OSTree rootfs as {} ...', dst))
shutil.rmtree(dst)
oe.path.copyhardlinktree(src, dst)
if self.OSTREE_REMOTE:
bb.note(self.format('Setting OSTree remote to {OSTREE_REMOTE} ...'))
self.run_ostree('remote add --repo={OSTREE_ROOTFS}/ostree/repo '
'--gpg-import={OSTREE_GPGDIR}/pubring.gpg '
'updates {OSTREE_REMOTE}')
def finalize_sysroot(self):
"""
Finalize the physical root directory after the ostree checkout.
"""
bb.note(self.format('Creating EFI mount point /boot/efi in OSTree rootfs {OSTREE_ROOTFS} ...'))
bb.utils.mkdirhier(os.path.join(self.OSTREE_ROOTFS, 'boot', 'efi'))
bb.note(self.format('Copying pristine rootfs {IMAGE_ROOTFS}/home to OSTree rootfs {OSTREE_ROOTFS} ...'))
oe.path.copyhardlinktree(os.path.join(self.IMAGE_ROOTFS, 'home'),
os.path.join(self.OSTREE_ROOTFS, 'home'))
def prepare_rootfs(self):
"""
Create the intermediate, bare repo and a fully functional rootfs for the target device
where the current build is deployed.
"""
self.prepare_sysroot()
self.populate_repo()
self.checkout_sysroot()
self.finalize_sysroot()
def export_repo(self):
"""
Export data from a primary OSTree repository to the given (archive-z2) one.
"""
bb.note(self.format('Exporting primary repository {OSTREE_BARE} to export repository {OSTREE_REPO}...'))
if not os.path.isdir(self.OSTREE_REPO):
bb.note("Initializing repository %s for exporting..." % self.OSTREE_REPO)
bb.utils.mkdirhier(self.OSTREE_REPO)
self.run_ostree('--repo={OSTREE_REPO} init --mode=archive-z2')
self.run_ostree('--repo={OSTREE_REPO} pull-local --remote={OSTREE_OS} {OSTREE_BARE} {OSTREE_BRANCHNAME}')
self.run_ostree('--repo={OSTREE_REPO} commit {gpg_sign} --branch={OSTREE_BRANCHNAME} --tree=ref={OSTREE_OS}:{OSTREE_BRANCHNAME}')
self.run_ostree('--repo={OSTREE_REPO} summary {gpg_sign} -u')
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Returns true iff the two initializers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, shape=None):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
t1 = init1(shape).eval()
with tc.test_session(graph=ops.Graph()):
t2 = init2(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, graph_seed, shape=None):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
graph_seed: A graph-level seed to use.
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init(shape).eval()
t2 = init(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=True):
return init([num]).eval()
return func
class ConstantInitializersTest(test.TestCase):
def testZerosInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.zeros_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testOnesInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantZeroInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testConstantOneInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantIntInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(7))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))
def _testNDimConstantInitializer(self, name, value, shape, expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer("list", value, shape, expected)
self._testNDimConstantInitializer("ndarray",
np.asarray(value), shape, expected)
self._testNDimConstantInitializer("2D-ndarray",
np.asarray(value).reshape(tuple(shape)),
shape, expected)
def _testNDimConstantInitializerLessValues(self, name, value, shape,
expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertGreater(len(actual), len(expected))
for i in xrange(len(actual)):
a = actual[i]
e = expected[i] if i < len(expected) else expected[-1]
self.assertEqual(a, e)
def testNDimConstantInitializerLessValues(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 4]
expected = list(value)
self._testNDimConstantInitializerLessValues("list", value, shape, expected)
self._testNDimConstantInitializerLessValues("ndarray",
np.asarray(value), shape,
expected)
self._testNDimConstantInitializerLessValues(
"2D-ndarray", np.asarray(value).reshape(tuple([2, 3])), shape, expected)
def _testNDimConstantInitializerMoreValues(self, value, shape):
ops.reset_default_graph()
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
self.assertRaises(
ValueError,
variable_scope.get_variable,
"x",
shape=shape,
initializer=init)
def testNDimConstantInitializerMoreValues(self):
value = [0, 1, 2, 3, 4, 5, 6, 7]
shape = [2, 3]
self._testNDimConstantInitializerMoreValues(value, shape)
self._testNDimConstantInitializerMoreValues(np.asarray(value), shape)
self._testNDimConstantInitializerMoreValues(
np.asarray(value).reshape(tuple([2, 4])), shape)
class RandomNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.random_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class TruncatedNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.truncated_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class RandomUniformInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
class UniformUnitScalingInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
init4 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
def testZeroSize(self):
shape = [0, 2]
with self.test_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
variables.global_variables_initializer().run()
self.assertAllEqual(shape, x.eval().shape)
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.uniform_unit_scaling_initializer,
dtype=dtypes.string)
# TODO(vrv): move to sequence_ops_test?
class RangeTest(test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session(use_gpu=True):
tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return tf_ans.eval()
def testBasic(self):
self.assertTrue(
np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(
np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(
np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)
def testLimitOnly(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(np.arange(5), math_ops.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
def testNonInteger(self):
self.assertTrue(
np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5])))
self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5])))
self.assertTrue(
np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7])))
self.assertTrue(
np.allclose(
self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)
def testNegativeDelta(self):
self.assertTrue(
np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
self.assertTrue(
np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
self.assertTrue(
np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
def testDType(self):
zero_int32 = math_ops.cast(0, dtypes.int32)
zero_int64 = math_ops.cast(0, dtypes.int64)
zero_float32 = math_ops.cast(0, dtypes.float32)
zero_float64 = math_ops.cast(0, dtypes.float64)
self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)
self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)
self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)
self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(test.TestCase):
def _gpu_modes(self):
if test.is_gpu_available():
return [False, True]
else:
return [False]
def _LinSpace(self, start, stop, num):
# NOTE(touts): Needs to pass a graph to get a new session each time.
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph, force_gpu=self.force_gpu):
tf_ans = math_ops.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
def testPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]),
1e-5)
def testNegative(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 2), np.array([-1., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 3), np.array([-1., -3., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 4),
np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
def testNegativeToPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 3), np.array([-1., 2., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 4), np.array([-1., 1., 3., 5.]), 1e-5)
def testPoint(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
class DeviceTest(test.TestCase):
def testNoDevice(self):
with ops.Graph().as_default():
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
with ops.Graph().as_default():
with ops.device("/job:ps"):
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
class OrthogonalInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.orthogonal_initializer()
self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.orthogonal_initializer()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[5])
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
with self.test_session(graph=ops.Graph(), use_gpu=True):
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
for dtype in [dtypes.float32, dtypes.float64]:
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops.orthogonal_initializer(dtype=dtype)
tol = 1e-5 if dtype == dtypes.float32 else 1e-12
with self.test_session(graph=ops.Graph(), use_gpu=True):
# Check the shape
t = init(shape).eval()
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
"""Backup functions for pg_dump"""
# Python stdlib
import os
import shlex
import tempfile
import logging
import subprocess
# 3rd party Postgres db connector
import psycopg2 as dbapi
import psycopg2.extensions
from holland.core.exceptions import BackupError
# holland-core has a few nice utilities such as format_bytes
from holland.core.util.fmt import format_bytes
# Holland general compression functions
from holland.lib.compression import open_stream
# holland-common safefilename encoding
from holland.lib.safefilename import encode as encode_safe
LOG = logging.getLogger(__name__)
class PgError(BackupError):
"""Raised when any error associated with Postgres occurs"""
def get_connection(config, db='template1'):
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
args = {}
# remap pgauth parameters to what psycopg2.connect accepts
remap = { 'hostname' : 'host', 'username' : 'user' }
for key in ('hostname', 'port', 'username', 'password'):
value = config['pgauth'].get(key)
key = remap.get(key, key)
if value is not None:
args[key] = value
connection = dbapi.connect(database=db, **args)
if not connection:
raise PgError("Failed to connect to the Postgres database.")
# set connection in autocommit mode
connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
if config["pgdump"]["role"]:
try:
cursor = connection.cursor()
cursor.execute("SET ROLE %s" % config["pgdump"]["role"])
except:
raise PgError("Failed to set role to " + config["pgdump"]["role"])
global ver
ver = connection.get_parameter_status('server_version')
LOG.info("Server version " + ver)
return connection
def get_db_size(dbname, connection):
try:
cursor = connection.cursor()
cursor.execute("SELECT pg_database_size('%s')" % dbname)
size = int(cursor.fetchone()[0])
LOG.info("DB %s size %s", dbname, format_bytes(size))
return size
except:
raise PgError("Could not detmine database size.")
def legacy_get_db_size(dbname, connection):
cursor = connection.cursor()
cursor.execute('SELECT SUM(relpages*8192) FROM pg_class')
size = int(cursor.fetchone()[0])
LOG.info("DB %s size %s", dbname, format_bytes(size))
cursor.close()
return size
def pg_databases(config, connection):
"""Find the databases available in the Postgres cluster specified
in config['pgpass']
"""
cursor = connection.cursor()
cursor.execute("SELECT datname FROM pg_database WHERE not datistemplate and datallowconn")
databases = [db for db, in cursor]
cursor.close()
logging.debug("pg_databases() -> %r", databases)
return databases
def run_pgdump(dbname, output_stream, connection_params, format='custom', env=None):
"""Run pg_dump for the given database and write to the specified output
stream.
:param db: database name
:type db: str
:param output_stream: a file-like object - must have a fileno attribute
that is a real, open file descriptor
"""
args = [ 'pg_dump' ] + connection_params + [
'--format', format,
dbname
]
LOG.info('%s > %s', subprocess.list2cmdline(args),
output_stream.name)
stderr = tempfile.TemporaryFile()
try:
try:
returncode = subprocess.call(args,
stdout=output_stream,
stderr=stderr,
env=env,
close_fds=True)
except OSError, exc:
raise PgError("Failed to execute '%s': [%d] %s" %
(args[0], exc.errno, exc.strerror))
stderr.flush()
stderr.seek(0)
for line in stderr:
LOG.error('%s', line.rstrip())
finally:
stderr.close()
if returncode != 0:
raise PgError("%s failed." % subprocess.list2cmdline(args))
def backup_globals(backup_directory, config, connection_params, env=None):
"""Backup global Postgres data that wouldn't otherwise
be captured by pg_dump.
Runs pg_dumpall -g > $backup_dir/globals.sql
:param backup_directory: directory to save pg_dump output to
:param config: PgDumpPlugin config dictionary
:raises: OSError, PgError on error
"""
path = os.path.join(backup_directory, 'global.sql')
zopts = config['compression']
output_stream = open_stream(path, 'w',
method=zopts['method'],
level=zopts['level'],
extra_args=zopts['options'])
args = [
'pg_dumpall',
'-g',
] + connection_params
LOG.info('%s > %s', subprocess.list2cmdline(args),
output_stream.name)
stderr = tempfile.TemporaryFile()
try:
try:
returncode = subprocess.call(args,
stdout=output_stream,
stderr=stderr,
env=env,
close_fds=True)
except OSError, exc:
raise PgError("Failed to execute '%s': [%d] %s" %
(args[0], exc.errno, exc.strerror))
output_stream.close()
stderr.flush()
stderr.seek(0)
for line in stderr:
LOG.error('%s', line.rstrip())
finally:
stderr.close()
if returncode != 0:
raise PgError("pg_dumpall command exited with failure code %d." %
returncode)
def generate_manifest(backups, path):
manifest = open(os.path.join(path, 'MANIFEST'), 'w')
for dbname, dumpfile in backups:
try:
print >>manifest, "%s\t%s" % (dbname.encode('utf8'),
os.path.basename(dumpfile))
except UnicodeError, exc:
LOG.error("Failed to encode dbname %s: %s", dbname, exc)
manifest.close()
def pgauth2args(config):
args = []
remap = { 'hostname' : 'host' }
for param in ('hostname', 'port', 'username'):
value = config['pgauth'].get(param)
key = remap.get(param, param)
if value is not None:
args.extend(['--%s' % key, str(value)])
# FIXME: --role only works on 8.4+
if config['pgdump']['role']:
if ver >= '8.4':
args.extend(['--role', config['pgdump']['role']])
else:
raise PgError("The --role option is available only in Postgres versions 8.4 and higher.")
return args
def pg_extra_options(config):
args = []
# normal compression doesn't make sense with --format=custom
# use pg_dump's builtin --compress option instead
if config['pgdump']['format'] == 'custom':
LOG.info("Ignore compression method, since custom format is in use.")
config['compression']['method'] = 'none'
args += ['--compress',
str(config['compression']['level'])]
additional_options = config['pgdump']['additional-options']
if additional_options:
# XXX: we may want to check these options more carefully and warn as appropriate.
additional_options = additional_options.encode('utf8')
args += shlex.split(additional_options)
return args
def generate_pgpassfile(backup_directory, password):
fileobj = open(os.path.join(backup_directory, 'pgpass'), 'w')
# pgpass should always be 0600
os.chmod(fileobj.name, 0600)
password = password.replace('\\', '\\\\')
password = password.replace(':', '\\:')
fileobj.write('*:*:*:*:%s' % password)
fileobj.close()
return fileobj.name
def backup_pgsql(backup_directory, config, databases):
"""Backup databases in a Postgres instance
:param backup_directory: directory to save pg_dump output to
:param config: PgDumpPlugin config dictionary
:raises: OSError, PgError on error
"""
connection_params = pgauth2args(config)
extra_options = pg_extra_options(config)
pgenv = dict(os.environ)
if config['pgauth']['password'] is not None:
pgpass_file = generate_pgpassfile(backup_directory,
config['pgauth']['password'])
if 'PGPASSFILE' in pgenv:
LOG.warn("Overriding PGPASSFILE in environment with %s because "
"a password is specified.",
pgpass_file)
pgenv['PGPASSFILE'] = pgpass_file
backup_globals(backup_directory, config, connection_params, env=pgenv)
ext_map = {
'custom' : '.dump',
'plain' : '.sql',
'tar' : '.tar',
}
backups = []
for dbname in databases:
format = config['pgdump']['format']
dump_name, _ = encode_safe(dbname)
if dump_name != dbname:
LOG.warn("Encoded database %s as filename %s", dbname, dump_name)
filename = os.path.join(backup_directory, dump_name + ext_map[format])
zopts = config['compression']
stream = open_stream(filename, 'w',
method=zopts['method'],
level=zopts['level'],
extra_args=zopts['options'])
backups.append((dbname, stream.name))
run_pgdump(dbname=dbname,
output_stream=stream,
connection_params=connection_params + extra_options,
format=format,
env=pgenv)
stream.close()
generate_manifest(backups, backup_directory)
def dry_run(databases, config):
args = pgauth2args(config)
LOG.info("pg_dumpall -g")
for db in databases:
LOG.info("pg_dump %s --format %s %s",
subprocess.list2cmdline(args),
config['pgdump']['format'],
db)
| |
import bisect
import json as json_
from collections import namedtuple
from jembatan.core.spandex.typesys_base import Span, Annotation, AnnotationScope
from pathlib import Path
from typing import ClassVar, Dict, Iterable, Optional, Tuple, Union
SpandexConstants = namedtuple("SpandexContstants", ["SPANDEX_DEFAULT_VIEW", "SPANDEX_URI_VIEW"])
constants = SpandexConstants("_SpandexDefaultView", "_SpandexUriView")
# object is mutable for performant reasons
class Spandex(object):
"""
Spandex - data structure for holding a view of data, its content, and annotations
"""
def __init__(self, parent: "Jembatan", content_string: str=None, content_mime: str = None, viewname=None):
self._parent = parent
self._content_string = content_string
self._content_mime = content_mime
self._annotations = []
self._annotation_keys = []
self.viewname = viewname
def __repr__(self):
return "<{}/{} at 0x{:x}>".format(self.__class__.__name__, self.viewname, id(self))
@property
def parent(self) -> "JembatanDoc":
return self._parent
@property
def content_string(self) -> str:
return self._content_string
@content_string.setter
def content_string(self, value: str):
self._content_string = value
@property
def content_mime(self) -> str:
return self._content_mime
@content_mime.setter
def content_mime(self, value: str):
self._content_mime = value
@property
def annotations(self) -> Iterable[Annotation]:
return self._annotations
def compute_keys(self, annotations: Iterable[Annotation]) -> Iterable[Tuple]:
return [a.index_key for a in annotations]
def spanned_text(self, span: Span) -> str:
"""
Return text covered by the span
"""
return self.content_string[span.begin:span.end]
def add_annotations(self, *annotations: Annotation):
items = sorted(self._annotations + list(annotations))
keys = self.compute_keys(items)
self._annotations = items
self._annotation_keys = keys
def index_annotations(self, *annotations: Annotation):
return self.add_annotations(annotations)
def select(self, type_: ClassVar[Annotation]) -> Iterable[Annotation]:
"""
Return all annotations of type_
"""
return [a for a in self.annotations if isinstance(a, type_)]
def select_covered(self, type_: ClassVar[Annotation], span: Span) -> Iterable[Annotation]:
"""
Return all annotations in a type_ that are covered by the input span
"""
begin = bisect.bisect_left(self._annotation_keys, (AnnotationScope.SPAN, span.begin))
end = bisect.bisect_left(self._annotation_keys, (AnnotationScope.SPAN, span.end))
return [a for a in self.annotations[begin:end] if isinstance(a, type_)]
def select_preceding(self, type_: ClassVar[Annotation], span: Span, count: int=None) -> Iterable[Annotation]:
"""
Return all annotations in a type_ that precede the input span
"""
precede_span = Span(begin=0, end=span.begin)
preceding = self.select_covered(type_, precede_span)
return preceding if count is None else preceding[-count:]
def select_following(self, type_: ClassVar[Annotation], span: Span, count: int=None) -> Iterable[Annotation]:
"""
Return all annotations in a type_ that follow the input span
"""
follow_span = Span(begin=span.end+1, end=len(self.content_string))
following = self.select_covered(type_, follow_span)
return following if count is None else following[0:count]
def select_all(self, span: Span) -> Iterable[Annotation]:
"""
Return all annotations in a view
"""
return self.annotations
def to_json(self, path: Union[str, Path, None] = None, pretty_print: bool = False) -> Optional[str]:
"""Creates a JSON representation of this Spandex.
Args:
path: File path, if `None` is provided the result is returned as a string
pretty_print: `True` if the resulting JSON should be pretty-printed, else `False`
Returns:
If `path` is None, then the JSON representation of this Spandex is returned as a string
"""
from jembatan.core.spandex.json import SpandexJsonEncoder
indent = 4 if pretty_print else None
# If `path` is None, then serialize to a string and return it
if path is None:
return json_.dumps(self, cls=SpandexJsonEncoder, indent=indent)
elif isinstance(path, str):
with open(path, "w") as f:
json_.dump(self, f, cls=SpandexJsonEncoder, indent=indent)
elif isinstance(path, Path):
with path.open("w") as f:
json_.dump(self, f, cls=SpandexJsonEncoder, indent=indent)
else:
raise TypeError("`path` needs to be one of [str, None, Path], but was <{0}>".format(type(path)))
class JembatanDoc(object):
"""
Top level container for processing. The JembatanDoc roughly describes / manages a document or artifact.
It is responsible for managing views.
"""
def __init__(self, metadata: Dict=None, content_string: str=None, content_mime: str=None):
self.metadata = metadata
self._views = {}
self.create_view(
constants.SPANDEX_DEFAULT_VIEW,
content_string=content_string,
content_mime=content_mime,
)
@property
def default_view(self):
return self.get_view(constants.SPANDEX_DEFAULT_VIEW)
def get_view(self, viewname: str):
view = None
try:
view = self.views[viewname]
except KeyError as e:
raise KeyError("No view named '{}' in Jembatan {}".format(viewname, self))
return view
def get_or_create_view(self, viewname: str):
try:
view = self.get_view(viewname)
except KeyError:
view = self.create_view(viewname)
return view
def __getitem__(self, viewname: str):
return self.get_view(viewname)
def create_view(self, viewname: str, content_string: str=None, content_mime: str=None):
if viewname in self.views:
raise KeyError("View {} already exists in Jembatan{}".format(viewname, self))
new_view_spndx = Spandex(content_string=content_string, content_mime=content_mime, parent=self, viewname=viewname)
self.views[viewname] = new_view_spndx
return new_view_spndx
@property
def views(self):
return self._views
class ViewMappedSpandex(object):
def __init__(self, spandex: Spandex, view_mapped_parent: JembatanDoc):
'''
Wrapper constructor.
@param obj: object to wrap
'''
# wrap the object
self._wrapped_spandex = spandex
self._wrapped_parent = view_mapped_parent
if view_mapped_parent.wrapped != self.wrapped.parent:
raise ValueError("Can not wrap parent from different Jembatans")
def __getattr__(self, attr):
# see if this object has attr
# NOTE do not use hasattr, it goes into
# infinite recurrsion
if attr in self.__dict__:
# this object has it
return getattr(self, attr)
# proxy to the wrapped object
return getattr(self.wrapped, attr)
def __repr__(self):
return "<{}/{} at 0x{:x}>".format(self.__class__.__name__, self.viewname, id(self))
@property
def wrapped(self):
return self._wrapped_spandex
@property
def parent(self):
return self._wrapped_parent
class ViewMappedJembatanDoc(object):
'''
Object wrapper class.
This a wrapper for objects. It is initialiesed with the object to wrap
and then proxies the unhandled getattribute methods to it.
Other classes are to inherit from it.
'''
def __init__(self, jemdoc: JembatanDoc, view_map):
'''
Wrapper constructor.
@param obj: object to wrap
'''
# wrap the object
self._wrapped_jemdoc = jemdoc
self.view_map = view_map
def __getattr__(self, attr):
# see if this object has attr
# NOTE do not use hasattr, it goes into
# infinite recurrsion
if attr in self.__dict__:
# this object has it
return getattr(self, attr)
# proxy to the wrapped object
return getattr(self._wrapped_jemdoc, attr)
@property
def wrapped(self):
return self._wrapped_jemdoc
@property
def default_view(self):
return self.get_view(constants.SPANDEX_DEFAULT_VIEW)
def get_view(self, viewname):
mapped_viewname = self.view_map.get(viewname, None)
if mapped_viewname is None:
# viewname was not specified in view map, so return the original view
view = self.wrapped.get_view(viewname)
else:
view = self.wrapped.get_view(mapped_viewname)
# we need to wrap the view so that if it references its parent it can get back to the ViewMapped version
# instead of the original one
return ViewMappedSpandex(view, self)
def create_view(self, viewname: str) -> Spandex:
if viewname in self.view_map:
mapped_viewname = self.view_map[viewname]
view = self.wrapped.create_view(mapped_viewname)
else:
view = self.wrapped.create_view(viewname)
return ViewMappedSpandex(view, parent=self)
def __getitem__(self, viewname: str) -> Spandex:
return self.get_view(viewname)
__all__ = ['errors', 'encoders']
| |
"""Single slice vgg with normalised scale.
"""
import functools
import lasagne as nn
import numpy as np
import theano
import theano.tensor as T
import data_loader
import deep_learning_layers
import image_transform
import layers
import preprocess
import postprocess
import objectives
import theano_printer
import updates
import utils
# Random params
rng = np.random
take_a_dump = False # dump a lot of data in a pkl-dump file. (for debugging)
dump_network_loaded_data = False # dump the outputs from the dataloader (for debugging)
# Memory usage scheme
caching = None
# Save and validation frequency
validate_every = 20
validate_train_set = True
save_every = 20
restart_from_save = False
dump_network_loaded_data = False
# Training (schedule) parameters
# - batch sizes
batch_size = 4
sunny_batch_size = 4
batches_per_chunk = 32
num_epochs_train = 150
# - learning rate and method
base_lr = 0.0001
learning_rate_schedule = {
0: base_lr,
8*num_epochs_train/10: base_lr/10,
19*num_epochs_train/20: base_lr/100,
}
momentum = 0.9
build_updates = updates.build_adam_updates
# Preprocessing stuff
cleaning_processes = [
preprocess.set_upside_up,]
cleaning_processes_post = [
functools.partial(preprocess.normalize_contrast_zmuv, z=2)]
augmentation_params = {
"rotation": (-180, 180),
"shear": (0, 0),
"translation": (-8, 8),
"flip_vert": (0, 1),
"roll_time": (0, 0),
"flip_time": (0, 0),
}
use_hough_roi = True
preprocess_train = functools.partial( # normscale_resize_and_augment has a bug
preprocess.preprocess_normscale,
normscale_resize_and_augment_function=functools.partial(
image_transform.normscale_resize_and_augment_2,
normalised_patch_size=(64,64)))
preprocess_validation = functools.partial(preprocess_train, augment=False)
preprocess_test = preprocess_train
sunny_preprocess_train = preprocess.sunny_preprocess_with_augmentation
sunny_preprocess_validation = preprocess.sunny_preprocess_validation
sunny_preprocess_test = preprocess.sunny_preprocess_validation
# Data generators
create_train_gen = data_loader.generate_train_batch
create_eval_valid_gen = functools.partial(data_loader.generate_validation_batch, set="validation")
create_eval_train_gen = functools.partial(data_loader.generate_validation_batch, set="train")
create_test_gen = functools.partial(data_loader.generate_test_batch, set=["validation", "test"])
def filter_samples(folders):
# don't use patients who don't have mre than 6 slices
import glob
return folders
# Input sizes
image_size = 64
nr_slices = 22
data_sizes = {
"sliced:data:sax": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:sax:locations": (batch_size, nr_slices),
"sliced:data:sax:is_not_padded": (batch_size, nr_slices),
"sliced:data:randomslices": (batch_size, nr_slices, 30, image_size, image_size),
"sliced:data:singleslice:2ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:4ch": (batch_size, 30, image_size, image_size), # 30 time steps, 30 mri_slices, 100 px wide, 100 px high,
"sliced:data:singleslice:difference:middle": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice:difference": (batch_size, 29, image_size, image_size),
"sliced:data:singleslice": (batch_size, 30, image_size, image_size),
"sliced:data:ax": (batch_size, 30, 15, image_size, image_size),
"sliced:data:shape": (batch_size, 2,),
"sunny": (sunny_batch_size, 1, image_size, image_size)
# TBC with the metadata
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
backwards=False,
learn_init=True,
gradient_steps=-1,
grad_clipping=False,
unroll_scan=False,
precompute_input=False)
# Architecture
def build_model():
import j6_2ch_gauss, j6_4ch_gauss
meta_2ch = j6_2ch_gauss.build_model()
meta_4ch = j6_4ch_gauss.build_model()
l_meta_2ch_systole = nn.layers.DenseLayer(meta_2ch["meta_outputs"]["systole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_2ch_diastole = nn.layers.DenseLayer(meta_2ch["meta_outputs"]["diastole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_4ch_systole = nn.layers.DenseLayer(meta_4ch["meta_outputs"]["systole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_4ch_diastole = nn.layers.DenseLayer(meta_4ch["meta_outputs"]["diastole"], num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
#################
# Regular model #
#################
input_size = data_sizes["sliced:data:sax"]
input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
input_size_locations = data_sizes["sliced:data:sax:locations"]
l0 = nn.layers.InputLayer(input_size)
lin_slice_mask = nn.layers.InputLayer(input_size_mask)
lin_slice_locations = nn.layers.InputLayer(input_size_locations)
# PREPROCESS SLICES SEPERATELY
# Convolutional layers and some dense layers are defined in a submodel
l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
import je_ss_jonisc64small_360_gauss_longer
submodel = je_ss_jonisc64small_360_gauss_longer.build_model(l0_slices)
# Systole Dense layers
l_sys_mu = submodel["meta_outputs"]["systole:mu"]
l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
l_sys_meta = submodel["meta_outputs"]["systole"]
# Diastole Dense layers
l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]
l_dia_meta = submodel["meta_outputs"]["diastole"]
# AGGREGATE SLICES PER PATIENT
l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)
# Systole
l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)
l_sys_meta = nn.layers.DenseLayer(nn.layers.ReshapeLayer(l_sys_meta, (-1, nr_slices, 512)), num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_systole = nn.layers.ConcatLayer([l_meta_2ch_systole, l_meta_4ch_systole, l_sys_meta])
l_weights = nn.layers.DenseLayer(l_meta_systole, num_units=512, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_weights = nn.layers.DenseLayer(l_weights, num_units=3, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
systole_output = layers.WeightedMeanLayer(l_weights, [l_systole, meta_2ch["outputs"]["systole"], meta_4ch["outputs"]["systole"]])
# Diastole
l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=100.)
l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)
l_dia_meta = nn.layers.DenseLayer(nn.layers.ReshapeLayer(l_dia_meta, (-1, nr_slices, 512)), num_units=64, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_meta_diastole = nn.layers.ConcatLayer([l_meta_2ch_diastole, l_meta_4ch_diastole, l_dia_meta])
l_weights = nn.layers.DenseLayer(l_meta_diastole, num_units=512, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)
l_weights = nn.layers.DenseLayer(l_weights, num_units=3, W=nn.init.Orthogonal(), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.identity)
diastole_output = layers.WeightedMeanLayer(l_weights, [l_diastole, meta_2ch["outputs"]["diastole"], meta_4ch["outputs"]["diastole"]])
submodels = [submodel, meta_2ch, meta_4ch]
return {
"inputs":dict({
"sliced:data:sax": l0,
"sliced:data:sax:is_not_padded": lin_slice_mask,
"sliced:data:sax:locations": lin_slice_locations,
}, **{ k: v for d in [model["inputs"] for model in [meta_2ch, meta_4ch]]
for k, v in d.items() }
),
"outputs": {
"systole": systole_output,
"diastole": diastole_output,
},
"regularizable": dict(
{},
**{
k: v
for d in [model["regularizable"] for model in submodels if "regularizable" in model]
for k, v in d.items() }
),
"pretrained":{
je_ss_jonisc64small_360_gauss_longer.__name__: submodel["outputs"],
j6_2ch_gauss.__name__: meta_2ch["outputs"],
j6_4ch_gauss.__name__: meta_4ch["outputs"],
},
#"cutoff_gradients": [
#] + [ v for d in [model["meta_outputs"] for model in [meta_2ch, meta_4ch] if "meta_outputs" in model]
# for v in d.values() ]
}
| |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Globals/locals/single arg dir nodes
These nodes give access to variables, highly problematic, because using them,
the code may change or access anything about them, so nothing can be trusted
anymore, if we start to not know where their value goes.
The "dir()" call without arguments is reformulated to locals or globals calls.
"""
from .ConstantRefNodes import makeConstantRefNode
from .DictionaryNodes import ExpressionKeyValuePair, makeExpressionMakeDict
from .ExpressionBases import ExpressionBase, ExpressionBuiltinSingleArgBase
from .VariableRefNodes import ExpressionTempVariableRef, ExpressionVariableRef
class ExpressionBuiltinGlobals(ExpressionBase):
kind = "EXPRESSION_BUILTIN_GLOBALS"
def __init__(self, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def computeExpressionRaw(self, trace_collection):
return self, None, None
@staticmethod
def mayHaveSideEffects():
return False
@staticmethod
def mayRaiseException(exception_type):
return False
class ExpressionBuiltinLocalsBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
__slots__ = ("variable_traces", "locals_scope")
def __init__(self, locals_scope, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.variable_traces = None
self.locals_scope = locals_scope
def finalize(self):
del self.locals_scope
del self.variable_traces
def mayHaveSideEffects(self):
return False
def mayRaiseException(self, exception_type):
return False
def getVariableTraces(self):
return self.variable_traces
def getLocalsScope(self):
return self.locals_scope
class ExpressionBuiltinLocalsUpdated(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_UPDATED"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
assert locals_scope is not None
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(self.locals_scope)
trace_collection.onLocalsDictEscaped(self.locals_scope)
return self, None, None
class ExpressionBuiltinLocalsRef(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_REF"
def __init__(self, locals_scope, source_ref):
ExpressionBuiltinLocalsBase.__init__(
self, locals_scope=locals_scope, source_ref=source_ref
)
def getLocalsScope(self):
return self.locals_scope
def computeExpressionRaw(self, trace_collection):
if self.locals_scope.isMarkedForPropagation():
result = makeExpressionMakeDict(
pairs=(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable_name, source_ref=self.source_ref
),
value=ExpressionTempVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
for variable_name, variable in self.locals_scope.getPropagationVariables().items()
),
source_ref=self.source_ref,
)
new_result = result.computeExpressionRaw(trace_collection)
assert new_result[0] is result
self.finalize()
return result, "new_expression", "Propagated locals dictionary reference."
# Just inform the collection that all escaped unless it is abortative.
if not self.getParent().isStatementReturn():
trace_collection.onLocalsUsage(locals_scope=self.locals_scope)
return self, None, None
class ExpressionBuiltinLocalsCopy(ExpressionBuiltinLocalsBase):
kind = "EXPRESSION_BUILTIN_LOCALS_COPY"
def computeExpressionRaw(self, trace_collection):
# Just inform the collection that all escaped.
self.variable_traces = trace_collection.onLocalsUsage(
locals_scope=self.locals_scope
)
for variable, variable_trace in self.variable_traces:
if (
not variable_trace.mustHaveValue()
and not variable_trace.mustNotHaveValue()
):
return self, None, None
# Other locals elsewhere.
if variable_trace.getNameUsageCount() > 1:
return self, None, None
pairs = []
for variable, variable_trace in self.variable_traces:
if variable_trace.mustHaveValue():
pairs.append(
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=variable.getName(),
user_provided=True,
source_ref=self.source_ref,
),
value=ExpressionVariableRef(
variable=variable, source_ref=self.source_ref
),
source_ref=self.source_ref,
)
)
# Locals is sorted of course.
def _sorted(pairs):
names = [
variable.getName()
for variable in self.locals_scope.getProvidedVariables()
]
return tuple(
sorted(
pairs,
key=lambda pair: names.index(
pair.subnode_key.getCompileTimeConstant()
),
)
)
result = makeExpressionMakeDict(
pairs=_sorted(pairs), source_ref=self.source_ref
)
return result, "new_expression", "Statically predicted locals dictionary."
class ExpressionBuiltinDir1(ExpressionBuiltinSingleArgBase):
kind = "EXPRESSION_BUILTIN_DIR1"
def computeExpression(self, trace_collection):
# TODO: Quite some cases should be possible to predict and this
# should be using a slot, with "__dir__" being overloaded or not.
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return self, None, None
| |
# Copyright 2014-present, Apstra, Inc. All rights reserved.
#
# This source code is licensed under End User License Agreement found in the
# LICENSE file at http://www.apstra.com/community/eula
import subprocess
import logging
import logging.handlers
import os
import socket
import requests
import json
from aeon.eos.device import Device as EosDevice
from aeon.cumulus.device import Device as CumulusDevice
from aeon.nxos.device import Device as NxosDevice
from aeon.ubuntu.device import Device as UbuntuDevice
from aeon.centos.device import Device as CentosDevice
from aeon.nxos.exceptions import CommandError as NxosCommandError
from aeon.exceptions import CommandError, TimeoutError, ProbeError, TargetError, LoginNotReadyError
from aeon.utils import get_device
from celery import Celery
__all__ = ['ztp_bootstrapper']
celery_config = dict()
celery_config['CELERY_BROKER_URL'] = 'amqp://'
celery_config['CELERY_RESULT_BACKEND'] = 'rpc://'
celery = Celery('aeon-ztp', broker=celery_config['CELERY_BROKER_URL'])
celery.conf.update(celery_config)
_AEON_PORT = os.getenv('AEON_HTTP_PORT')
_AEON_DIR = os.getenv('AEON_TOPDIR')
_AEON_LOGFILE = os.getenv('AEON_LOGFILE')
def get_server_ipaddr(dst):
dst_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dst_s.connect((dst, 0))
return dst_s.getsockname()[0]
def post_device_status(server, target, os_name=None, message=None, state=None):
if os_name:
data = dict(os_name=os_name,
ip_addr=target,
state=state,
message=message)
else:
data = dict(ip_addr=target,
state=state,
message=message)
requests.put(
url='http://%s/api/devices/status' % server,
json=data)
def get_device_state(server, target):
r = requests.get(url='http://{server}/api/devices?ip_addr={ip_addr}'
.format(server=server, ip_addr=target))
try:
state = r.json()['items'][0]['state']
except KeyError:
state = None
return state
def get_device_facts(server, target):
r = requests.get(url='http://{server}/api/devices?ip_addr={ip_addr}'
.format(server=server, ip_addr=target))
facts = r.json().get('items')[0]
if facts and 'facts' in facts:
facts_column = json.loads(facts.pop('facts'))
facts.update(facts_column)
return facts
else:
return facts
def setup_logging(logname, target):
log = logging.getLogger(name=logname)
log.setLevel(logging.INFO)
handler = logging.handlers.SysLogHandler(address='/dev/log')
fmt = logging.Formatter(
'%(name)s %(levelname)s {target}: %(message)s'.format(target=target))
handler.setFormatter(fmt)
log.addHandler(handler)
return log
def do_finalize(server, os_name, target, log, finally_script=None):
profile_dir = os.path.join(_AEON_DIR, 'etc', 'profiles', os_name)
os_sel = os.path.join(profile_dir, 'os-selector.cfg')
if not finally_script:
log.info(
'Skipping finally script: No finally script specified for {target} in {os_sel}.'.format(target=target,
os_sel=os_sel))
return 0, None
finalizer = os.path.join(profile_dir, finally_script)
if not os.path.isfile(finalizer):
log.info('no user provided finally script found at: "{}"'.format(profile_dir))
return 0, None
json_facts = json.dumps(get_device_facts(server, target))
cmd_args = [
finalizer,
'-t %s' % target,
'-s %s' % server,
'-u AEON_TUSER',
'-p AEON_TPASSWD',
'-l %s' % _AEON_LOGFILE,
"-f '{}'".format(json_facts)
]
cmd_str = ' '.join(cmd_args)
this_env = os.environ.copy()
this_env.update(dict(
AEON_LOGFILE=_AEON_LOGFILE,
AEON_TARGET=target,
AEON_SERVER=server,
FACTS=json_facts))
child = subprocess.Popen(
cmd_str, shell=True, env=this_env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
log_message = "executing 'finally' script:[pid={pid}] {cmd}".format(pid=child.pid, cmd=cmd_str)
log.info(log_message)
post_message = "executing 'finally' script:[pid={pid}]".format(pid=child.pid)
post_device_status(server=server,
os_name=os_name, target=target,
state='FINALLY', message=post_message)
_stdout, _stderr = child.communicate()
rc = child.returncode
log.info("finally script complete: rc={}".format(rc))
if len(_stderr):
log.info("finally stderr=[{}]".format(_stderr))
return rc, _stderr
def do_bootstrapper(server, os_name, target, log):
prog = '%s/bin/%s_bootstrap*' % (_AEON_DIR, os_name)
cmd_args = [
prog,
'--target %s' % target,
'--server %s' % server,
'--topdir %s' % _AEON_DIR,
'-U AEON_TUSER',
'-P AEON_TPASSWD'
]
cmd_str = ' '.join(cmd_args)
# must pass command as a single string; using shell=True
this = subprocess.Popen(
cmd_str, shell=True, env=os.environ.copy(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
log.info("starting bootstrapper[pid={pid}] [{cmd_str}]".format(
pid=this.pid, cmd_str=cmd_str))
_stdout, _stderr = this.communicate()
rc = this.returncode
log.info("bootstrapper complete: rc={}".format(rc))
if len(_stderr):
log.error("stderr={}".format(_stderr))
return rc, _stderr
@celery.task
def ztp_bootstrapper(os_name, target):
server = "{}:{}".format(get_server_ipaddr(target), _AEON_PORT)
log = setup_logging(logname='aeon-bootstrapper', target=target)
try:
state = get_device_state(server, target)
if state and state not in ('RETRY', 'ERROR', 'DONE'):
log.warning('Device at {} has already registered. This is likely a duplicate bootstrap run and will '
'be terminated.'.format(target))
return
if state == 'DONE':
log.warning('Device at {} has previously successfully completed ZTP process. '
'ZTP process has been initiated again.'.format(target))
got = requests.post(
url='http://%s/api/devices' % server,
json=dict(
ip_addr=target, os_name=os_name,
state='REGISTERED',
message='device registered, waiting for bootstrap start'))
if not got.ok:
body = got.json()
log.error('Unable to register device: %s' % body['message'])
return got.status_code
rc, _stderr = do_bootstrapper(server=server, os_name=os_name, target=target, log=log)
if 0 != rc:
post_device_status(server=server,
os_name=os_name, target=target,
state='ERROR', message='Error running bootstrapper: {}'.format(_stderr))
return rc
facts = get_device_facts(server, target)
finally_script = facts.get('finally_script', None)
rc, _stderr = do_finalize(server=server, os_name=os_name, target=target, log=log, finally_script=finally_script)
if rc != 0:
post_device_status(server=server,
os_name=os_name,
target=target,
state='ERROR',
message='Error running finally script: {}'.format(_stderr))
return rc
post_device_status(server=server,
os_name=os_name, target=target,
state='DONE', message='device bootstrap completed')
finally:
log.handlers.pop()
return rc
@celery.task
def ztp_finalizer(os_name, target):
server = "{}:{}".format(get_server_ipaddr(target), _AEON_PORT)
facts = get_device_facts(server, target)
finally_script = facts.get('finally_script', None)
log = setup_logging(logname='aeon-finalizer', target=target)
try:
rc, _stderr = do_finalize(server=server, os_name=os_name, target=target, log=log, finally_script=finally_script)
if 0 != rc:
post_device_status(server=server,
os_name=os_name, target=target,
state='ERROR', message='Error running finally script: {}'.format(_stderr))
return rc, _stderr
finally:
log.handlers.pop()
@celery.task
def retry_ztp(target, nos=None, user='admin', password='admin'):
log = setup_logging(logname='aeon-retry', target=target)
cumulus_lease_file = '/var/lib/dhcp/dhclient.eth0.leases'
server = "{}:{}".format(get_server_ipaddr(target), _AEON_PORT)
dev_table = {
'eos': {
'dev_obj': EosDevice,
'cmds': ['write erase now', 'reload now']
},
'cumulus': {
'dev_obj': CumulusDevice,
'cmds': [
"sudo sed -i '/vrf mgmt/d' /etc/network/interfaces",
'sudo ztp -R',
'sudo reboot'
],
'virt_cmds': [
"sudo ztp -v -r $(cat %s | grep 'cumulus-provision-url'| tail -1 | cut -f2 -d \\\")" % cumulus_lease_file
]
},
'nxos': {
'dev_obj': NxosDevice,
'cmds': 'terminal dont-ask ; write erase ; reload'
},
'opx': {
'dev_obj': UbuntuDevice,
'cmds': ['curl "http://{}/api/register/opx"'.format(get_server_ipaddr(target))]
},
'ubuntu': {
'dev_obj': UbuntuDevice,
'cmds': ['curl "http://{}/api/register/ubuntu"'.format(get_server_ipaddr(target))]
},
'centos': {
'dev_obj': CentosDevice,
'cmds': ['curl "http://{}/api/register/centos"'.format(get_server_ipaddr(target))]
}
}
def post_success():
message = 'Retry successfully initiated'
log.info(message)
post_device_status(server=server,
target=target,
state='RETRY', message=message)
try:
if not nos:
log.info('Determining device OS type')
dev = get_device(target=target, user=user, passwd=password)
dev.gather_facts()
nos = dev.facts['os_name'].lower()
else:
nos = nos.lower()
log.info('Device OS type: %s' % nos)
if not any(nos in x for x in dev_table):
error_msg = 'Retry not supported for device type %s' % nos
log.error(error_msg)
post_device_status(server=server,
target=target,
state='ERROR', message=error_msg)
return False, error_msg
dev = dev_table[nos]['dev_obj'](target, user=user, passwd=password)
except (ProbeError, TargetError) as e:
error_msg = 'Error accessing device: %s' % str(e)
log.error(error_msg)
post_device_status(server=server,
target=target,
state='ERROR', message=error_msg)
log.handlers.pop()
return False, error_msg
try:
# CumulusVX doesn't always boot into ZTP mode without network errors
# Use different retry commands for CVX
if dev.facts['os_name'] == 'cumulus' and dev.facts['virtual']:
log.info('Running retry commands: %s' % dev_table[nos]['virt_cmds'])
ok, output = dev.api.execute(dev_table[nos]['virt_cmds'])
post_success()
return ok, output
# aeon-venos NxosDevice doesn't use execute for some reason
elif dev.facts['os_name'] == 'nxos':
# Ignore timeout after reload
try:
log.info('Running retry commands. %s' % dev_table[nos]['cmds'])
output = (dev.api.exec_config(dev_table[nos]['cmds'], timeout=10))
except TimeoutError:
post_success()
output = True
ok = True
else:
log.info('Running retry commands: %s' % dev_table[nos]['cmds'])
ok, output = dev.api.execute(dev_table[nos]['cmds'])
post_success()
return ok, output
except (CommandError, NxosCommandError) as e:
# IncompleteRead error raised when reloading EOS. This is normal.
if 'IncompleteRead' in str(e.exc):
post_success()
return True, None
error_msg = 'Unable to initiate ZTP retry: %s' % str(e)
log.error(error_msg)
post_device_status(server=server,
target=target,
state='ERROR', message=error_msg)
return False, e
except TimeoutError:
error_msg = 'Device %s unreachable' % target
log.error('Unable to initiate ZTP retry: Device %s unreachable' % target)
post_device_status(server=server,
target=target,
state='ERROR', message=error_msg)
return False, error_msg
except LoginNotReadyError as e:
error_msg = 'Unable to login to device: %s' % str(e)
log.error(error_msg)
post_device_status(server=server,
target=target,
state='ERROR', message=error_msg)
finally:
log.handlers.pop()
return ok, output
| |
import struct
from types import StringType
class _BUILDER:
'''Virtual base helper class for structured file scanning'''
def _get_struct_fmt(self,info):
fmt = '<'
for f, _, _ in info:
fmt += f
return fmt
def _scan_from_file(self,f,info):
fmt = self._get_struct_fmt(info)
size = struct.calcsize(fmt)
T = struct.unpack(fmt,f.read(size))
i = 0
for _, n, _ in info:
setattr(self,n,T[i])
i = i + 1
def _dump(self,A):
for a in A:
print a, getattr(self,a)
def _attr_names(self,*I):
A = []
for i in I:
if type(i) is StringType:
A.append(i)
else:
A.extend(map(lambda x: x[1],i))
return A
def _scanZTStr(self,f,loc):
'''scan a zero terminated string from the file'''
f.seek(loc)
s = ''
while 1:
c = f.read(1)
if c=='\000': break
s = s+c
return s
def _scanN(self,N,fmt,f,loc):
if not loc: return None
fmt = len(fmt)==1 and ("<%d%c" % (N,fmt)) or ("<"+N*fmt)
f.seek(loc)
size = struct.calcsize(fmt)
return struct.unpack(fmt,f.read(size))
def _scanNT(self,T,N,fmt,f,loc):
if not loc: return None
n = len(fmt)
X = []
i = 0
S = []
for x in self._scanN(N,fmt,f,loc):
S.append(x)
i = i + 1
if i==n:
X.append(S)
i = 0
S = []
return map(lambda x,T=T: T(*x),X)
class KernPair:
'''hold info about a possible kerning pair'''
def __init__(self,first,second,amount):
self.first = first
self.scond = second
self.amount = amount
class KernTrack:
def __init__(self,degree,minSize,minAmount,maxSize,maxAmount):
'''
degree amount to change the character spacing. Negative values mean closer together,p
ositive values mean farther apart.
minSize minimum font height (in device units) for which to use linear track kerning.
minAmount track kerning amount to use for font heights less or equal ktMinSize.
maxSize maximum font height (in device units) for which to use linear track kerning.f
For font heights between ktMinSize and ktMaxSize the track kerning amount has
to increase linearily from ktMinAmount to ktMaxAmount.
maxAmount track kerning amount to use for font heights greater or equal ktMaxSize.
'''
self.degree = degree
self.minSize = minSize
self.minAmount = minAmount
self.maxSize = maxSize
self.maxAmount = maxAmount
class PFM(_BUILDER):
def __init__(self,fn=None):
if fn:
if type(fn) is StringType:
f = open(fn,'rb')
else:
f = fn
self.scan_from_file(f)
if f is not fn: f.close()
'''Class to hold information scanned from a type-1 .pfm file'''
def scan_from_file(self,f):
self._scan_from_file(f,self._header_struct_info)
if self.dfType!=0x81: raise ValueError, "Not a Type-1 Font description"
else: self.WidthTable = None
self._scan_from_file(f,self._extension_struct_info)
if not self.dfExtentTable: raise ValueError, 'dfExtentTable is zero'
if not self.dfExtMetricsOffset: raise ValueError, 'dfExtMetricsOffset is zero'
if self.dfDevice: self.DeviceName = self._scanZTStr(f,self.dfDevice)
else: self.DeviceName = None
if self.dfFace: self.FaceName = self._scanZTStr(f,self.dfFace)
else: self.FaceName = None
f.seek(self.dfExtMetricsOffset)
self._scan_from_file(f, self._extTextMetrics_struct_info)
N = self.dfLastChar - self.dfFirstChar + 1
self.ExtentTable = self._scanN(N,'H',f,self.dfExtentTable)
if self.dfDriverInfo: self.DriverInfo = self._scanZTStr(f,self.dfDriverInfo)
else: self.DriverInfo = None
if self.dfPairKernTable: self.KerningPairs = self._scanNT(KernPair,self.dfPairKernTable,'BBh',f,self.etmKernPairs)
else: self.KerningPairs = []
if self.dfTrackKernTable: self.KerningTracks = self._scanNT(KernTrack,self.dfTrackKernTable,'hhhhh',f,self.etmKernTracks)
else: self.KerningTracks = []
def dump(self):
self._dump(
self._attr_names(
self._header_struct_info,'WidthTable',
self._extension_struct_info,
'DeviceName',
'FaceName',
self._extTextMetrics_struct_info,
'DriverInfo',
))
_header_struct_info = (('H','dfVersion',
'''This field contains the version of the PFM file.
For PFM files that conform to this description
(namely PFM files for Type-1 fonts) the
value of this field is always 0x0100.'''),
('i','dfSize',
'''This field contains the total size of the PFM file in bytes.
Some drivers check this field and compare its value with the size of the PFM
file, and if these two values don't match the font is ignored
(I know this happens e.g. with Adobe PostScript printer drivers). '''),
('60s','dfCopyright',
'''This field contains a null-terminated copyright
string, often from the application that created the
PFM file (this normally isn't the
copyright string for the font file itself).
The unused bytes in this field should be set to zero. '''),
('H','dfType',
'''This field contains the font type. The low-order
byte is a combination of the following values
(only the values being of interest in PFM
files are given):
0x00 (PF_RASTER_TYPE): font is a raster font
0x01 (PF_VECTOR_TYPE): font is a vector font
0x80 (PF_DEVICE_REALIZED): font realized by the device driver
The high-order byte is never used in PFM files, it is always zero.
In PFM files for Type-1 fonts the value in this field is always 0x0081. '''),
('H','dfPoints',
'''This field contains the point size at which this font
looks best. Since this is not relevant for scalable fonts
the field is ignored. The value
of this field should be set to 0x000a (10 pt). '''),
('H','dfVertRes',
'''This field contains the vertical resolution at which the
font was digitized (the value is in dots per inch).
The value of this field should be
set to 0x012C (300 dpi). '''),
('H','dfHorizRes',
'''This field contains the horizontal resolution at which
the font was digitized (the value is in dots per inch).
The value of this field should
be set to 0x012C (300 dpi). '''),
('H','dfAscent',
'''This field contains the distance from the top of a
character definition cell to the baseline of the
typographical font. It is useful for aligning the
baseline of fonts of different heights. '''),
('H','dfInternalLeading',
'''This field contains the amount of leading inside
the bounds set by the dfPixHeight field in the PFMHEADER
structure. Accent marks may occur in this area. '''),
('H','dfExternalLeading',
'''This field contains the amount of extra leading that the
designer requests the application to add between rows. Since this area is
outside the character definition cells, it contains no marks and will not be altered by text outputs. '''),
('B','dfItalic',
'''This field specifies whether this font is an italic
(or oblique) font. The low-order bit is 1 if the flag
is set, all other bits are zero. '''),
('B','dfUnderline',
'''This field specifies whether this font is an underlined
font. The low-order bit is 1 if the flag is set, all other
bits are zero. '''),
('B','dfStrikeOut',
'''This field specifies whether this font is a striked-out font.
The low-order bit is 1 if the flag is set, all other bits are zero. '''),
('H','dfWeight',
'''This field contains the weight of the characters in this font.
The value is on a scale from 0 through 1000, increments are in
steps of 100 each. The values roughly give the number of black
pixel from every 1000 pixels. Typical values are:
0 (FW_DONTCARE): unknown or no information
300 (FW_LIGHT): light font
400 (FW_NORMAL): normal font
700 (FW_BOLD): bold font '''),
('B','dfCharSet',
'''This field specifies the character set used in this font.
It can be one of the following values (probably other values
may be used here as well):
0x00 (ANSI_CHARSET): the font uses the ANSI character set;
this means that the font implements all characters needed for the
current Windows code page (e.g. 1252). In case of a Type-1 font
this font has been created with the encoding StandardEncoding
Note that the code page number itself is not stored in the PFM file.
0x02 (SYMBOL_CHARSET): the font uses a font-specific encoding
which will be used unchanged in displaying an printing text
using this font. In case of a Type-1 font this font has been
created with a font-specific encoding vector. Typical examples are
the Symbol and the ZapfDingbats fonts.
0xFF (OEM_CHARSET): the font uses the OEM character set; this
means that the font implements all characters needed for the
code page 437 used in e.g. MS-DOS command line mode (at least
in some versions of Windows, others might use code page
850 instead). In case of a Type-1 font this font has been created with a font-specific encoding vector. '''),
('H','dfPixWidth',
'''This field contains the width of all characters in the font.
For raster fonts this field contains the width in pixels of every
character bitmap if the font is fixed-pitch, otherwise this field
is zero and the character's widths are specified in the WidthTable
table. For vector fonts this field contains the width of the grid
on which the font was digitized. The value is ignored by PostScript
printer drivers. '''),
('H','dfPixHeight',
'''This field contains the height of all characters in the font.
For raster fonts this field contains the height in scan lines of
every character bitmap. For vector fonts this field contains the
height of the grid on which the font was digitized. The value is
ignored by PostScript printer drivers. '''),
('B','dfPitchAndFamily',
'''This field specifies the font pitch and the font family. The
font pitch specifies whether all characters in the font have the
same pitch (this is called fixed pitch too) or variable pitch.
The font family indicates, in a rather general way, the look of a font.
The least significant bit in this field contains the pitch flag.
If the bit is set the font is variable pitch, otherwise it's fixed pitch. For
Type-1 fonts this flag is set always, even if the Type-1 font is fixed pitch.
The most significant bits of this field specify the font family.
These bits may have one of the following values:
0x00 (FF_DONTCARE): no information
0x10 (FF_ROMAN): serif font, variable pitch
0x20 (FF_SWISS): sans serif font, variable pitch
0x30 (FF_MODERN): fixed pitch, serif or sans serif font
0x40 (FF_SCRIPT): cursive or handwriting font
0x50 (FF_DECORATIVE): novelty fonts '''),
('H','dfAvgWidth',
'''This field contains the average width of the characters in the font.
For a fixed pitch font this is the same as dfPixWidth in the
PFMHEADER structure. For a variable pitch font this is the width
of the character 'X'. '''),
('H','dfMaxWidth',
'''This field contains the maximum width of the characters in the font.
For a fixed pitch font this value is identical to dfAvgWidth in the
PFMHEADER structure. '''),
('B','dfFirstChar',
'''This field specifies the first character code defined by this font.
Width definitions are stored only for the characters actually present
in a font, so this field must be used when calculating indexes into the
WidthTable or the ExtentTable tables. For text fonts this field is
normally set to 0x20 (character space). '''),
('B','dfLastChar',
'''This field specifies the last character code defined by this font.
Together with the dfFirstChar field in the PFMHEADER structure this
field specifies the valid character range for this font. There must
be an entry in the WidthTable or the ExtentTable tables for every
character between these two values (including these values themselves).
For text fonts this field is normally set to 0xFF (maximum
possible value). '''),
('B','dfDefaultChar',
'''This field specifies the default character to be used whenever a
character is used that is outside the range of the dfFirstChar through
dfLastChar fields in the PFMHEADER structure. The character is given
relative to dfFirstChar so that the actual value of the default
character is the sum of dfFirstChar and dfDefaultChar. Ideally, the
default character should be a visible character in the current font,
e.g. a period ('.'). For text fonts this field is normally set to
either 0x00 (character space) or 0x75 (bullet). '''),
('B','dfBreakChar',
'''This field specifies the word-break character. Applications
use this character to separate words when wrapping or justifying lines of
text. The character is given relative to dfFirstChar in the PFMHEADER
structure so that the actual value of the word-break character
is the sum of dfFirstChar and dfBreakChar. For text fonts this
field is normally set to 0x00 (character space). '''),
('H','dfWidthBytes',
'''This field contains the number of bytes in every row of the
font bitmap. The value is always an even quantity so that rows of the
bitmap start on 16 bit boundaries. This field is not used for vector
fonts, it is therefore zero in e.g. PFM files for Type-1 fonts. '''),
('i','dfDevice',
'''This field contains the offset from the beginning of the PFM file
to the DeviceName character buffer. The DeviceName is always
present in PFM files for Type-1 fonts, this field is therefore never zero.'''),
('i','dfFace',
'''This field contains the offset from the beginning of the PFM file
to the FaceName character buffer. The FaceName is always present
in PFM files for Type-1 fonts, this field is therefore never zero. '''),
('i','dfBitsPointer',
'''This field is not used in PFM files, it must be set to zero. '''),
('i','dfBitsOffset',
'''This field is not used in PFM files, it must be set to zero. '''),
)
#'H','WidthTable[]'
#This section is present in a PFM file only when this PFM file describes a
#variable pitch raster font. Since Type-1 fonts aren't raster fonts this
#section never exists in PFM files for Type-1 fonts.'''
#The WidthTable table consists of (dfLastChar - dfFirstChar + 2) entries of type WORD (dfFirstChar and dfLastChar can be found in the
#PFMHEADER structure). Every entry contains the width of the corresponding character, the last entry in this table is extra, it is set to zero.
_extension_struct_info=(
('H','dfSizeFields',
'''This field contains the size (in bytes) of the
PFMEXTENSION structure. The value is always 0x001e. '''),
('I','dfExtMetricsOffset',
'''This field contains the offset from the beginning
of the PFM file to the ExtTextMetrics section.
The ExtTextMetrics section is always present in PFM
files for Type-1 fonts, this field is therefore never
zero. '''),
('I','dfExtentTable',
'''This field contains the offset from the beginning
of the PFM file to the ExtentTable table. This table
is always present in PFM files for Type-1 fonts, this
field is therefore never zero. '''),
('I','dfOriginTable',
'''This field contains the offset from the beginning
of the PFM file to a table containing origin coordinates
for screen fonts. This table is not present in PFM files
for Type-1 fonts, the field must therefore be set to zero. '''),
('I','dfPairKernTable',
'''This field contains the offset from the beginning of
the PFM file to the KerningPairs table. The value must
be zero if the PFM file doesn't contain a KerningPairs
table. '''),
('I','dfTrackKernTable',
'''This field contains the offset from the beginning of
the PFM file to the KerningTracks table. The value must
be zero if the PFM file doesn't contain a kerningTracks
table. '''),
('I','dfDriverInfo',
'''This field contains the offset from the beginning of
the PFM file to the DriverInfo section. This section is
always present in PFM files for Type-1 fonts, this field
is therefore never zero. '''),
('I','dfReserved',
'''This field must be set to zero. '''),
)
#char DeviceName[]
#The DeviceName character buffer is a null-terminated string
#containing the name of the printer driver family. PFM files
#for Type-1 fonts have the string 'PostScript', PFM files for
#PCL fonts have the string 'PCL/HP LaserJet'.
#char FaceName[]
#The FaceName character buffer is a null-terminated string
#containing the name of the font face. In PFM files for Type-1
#fonts this is normally
#the PostScript name of the font without suffixes like
#'-Bold', '-Italic' etc.
_extTextMetrics_struct_info = (('h','etmSize',
'''This field contains the size (in bytes) of the
EXTTEXTMETRIC structure. The value is always 0x0034. '''),
('h','etmPointSize',
'''This field contains the nominal point size of the font
in twips (this is a twentieth of a point or 1/1440 inch).
This is the intended graphics art size of the font, the
actual size may differ slightly depending on the resolution
of the output device. In PFM files for Type-1 fonts this value
should be set to 0x00f0 (240 twips or 12 pt). '''),
('h','etmOrientation',
'''This field contains the orientation of the font.
This value refers to the ability of the font to be
imaged on a page of a given orientation. It
can be one of the following values:
0x0000: any orientation
0x0001: portrait (page width is smaller that its height)
0x0002: landscape (page width is greater than its height)
In PFM files for Type-1 fonts this field is always 0x0000
since a Type-1 font can be arbitrarily rotated. '''),
('h','etmMasterHeight',
'''This field contains the font size in device units for
which the values in the ExtentTable table are exact. Since
Type-1 fonts are by convention defined in a box of 1000 x 1000
units, PFM files for Type-1 fonts have the value 0x03E8 (1000,
the number of units per em) in this field. '''),
('h','etmMinScale',
'''This field contains the minimum valid size for the font in
device units. The minimum valid point size can then be calculated
as follows:
(etmMinScale * points-per-inch) / dfVertRes
The value for 'points-per-inch' is normally 72, the dfVertRes
field can be found in the PFMHEADER structure, it contains the
vertical resolution at which the font was digitized (this
value is in dots per inch).
In PFM files for Type-1 fonts the value should be set to 0x0003. '''),
('h','etmMaxScale',
'''This field contains the maximum valid size for the font in
device units. The maximum valid point size can then be calculated
as follows:
(etmMaxScale * points-per-inch) / dfVertRes
(see also above etmMinScale).
In PFM files for Type-1 fonts the value should be set to 0x03E8 (1000). '''),
('h','etmMasterUnits',
'''This field contains the integer number of units per em
where an em equals etmMasterHeight in the EXTTEXTMETRIC structure.
In other words, the etmMasterHeight value is expressed in font
units rather than device units.
In PFM files for Type-1 fonts the value should be set to
0x03E8 (1000). '''),
('h','etmCapHeight',
'''This field contains the height for uppercase characters
in the font (the value is in font units). Typically, the
character 'H' is used for measurement purposes.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmXHeight',
'''This field contains the height for lowercase characters
in the font (the value is in font units). Typically, the
character 'x' is used for measurement purposes.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmLowerCaseAscent',
'''This field contains the distance (in font units) that
the ascender of lowercase letters extends above the baseline.
This distance is typically specified for a lowercase character 'd'.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmLowerCaseDescent',
'''This field contains the distance (in font units) that
the descender of lowercase letters extends below the baseline.
This distance is typically specified for a lowercase character 'p'.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmSlant',
'''This field contains the angle in tenth of degrees clockwise
from the upright version of the font. The value is typically not zero only for
an italic or oblique font.
For Type-1 fonts you may find this value in the AFM file
(search for the entry 'ItalicAngle' and multiply it by 10). '''),
('h','etmSuperScript',
'''This field contains the recommended amount (in font units)
to offset superscript characters from the baseline. This amount
is typically specified by a negative offset. '''),
('h','etmSubScript',
'''This field contains the recommended amount (in font units)
to offset subscript characters from the baseline. This amount
is typically specified by a positive offset. '''),
('h','etmSuperScriptSize',
'''This field contains the recommended size (in font units)
for superscript characters in the font. '''),
('h','etmSubScriptSize',
'''This field contains the recommended size (in font units)
for subscript characters in the font. '''),
('h','etmUnderlineOffset',
'''This field contains the offset (in font units) downward
from the baseline where the top of a single underline bar
should appear.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmUnderlineWidth',
'''This field contains the thickness (in font units) of the underline bar.
For Type-1 fonts you may find this value in the AFM file. '''),
('h','etmDoubleUpperUnderlineOffset',
'''This field contains the offset (in font units) downward from
the baseline where the top of the upper, double underline bar should
appear. '''),
('h','etmDoubleLowerUnderlineOffset',
'''This field contains the offset (in font units) downward
from the baseline where the top of the lower, double underline
bar should appear. '''),
('h','etmDoubleUpperUnderlineWidth',
'''This field contains the thickness (in font units) of the
upper, double underline bar. '''),
('h','etmDoubleLowerUnderlineWidth',
'''This field contains the thickness (in font units) of the
lower, double underline bar. '''),
('h','etmStrikeOutOffset',
'''This field contains the offset (in font units) upward from
the baseline where the top of a strikeout bar should appear. '''),
('h','etmStrikeOutWidth',
'''This field contains the thickness (in font units) of the
strikeout bar. '''),
('H','etmKernPairs',
'''This field contains the number of kerning pairs defined
in the KerningPairs table in this PFM file. The number (and
therefore the table) may not be greater than 512. If the PFM
file doesn't contain a KerningPairs table the value is zero. '''),
('H','etmKernTracks',
'''This field contains the number of kerning tracks defined in
the KerningTracks table in this PFM file. The number (and therefore the
table) may not be greater than 16. If the PFM file doesn't contain
a KerningTracks table the value is zero. '''),
)
#'H','ExtentTable[]'
#The ExtentTable table must be present in a PFM file for a Type-1 font,
#it contains the unscaled widths (in 1/1000's of an em) of the characters
#in the font. The table consists of (dfLastChar - dfFirstChar + 1) entries
#of type WORD (dfFirstChar and dfLastChar can be found in the PFMHEADER
#structure). For Type-1 fonts these widths can be found in the AFM file.
#DRIVERINFO DriverInfo
#The DriverInfo section must be present in a PFM file for a Type-1 font,
#in this case it consists of a null-terminated string containing the
#PostScript name of the font.
#PAIRKERN KerningPairs[]
#The KerningPairs table need not be present in a PFM file for a Type-1
#font, if it exists it contains etmKernPairs (from the EXTTEXTMETRIC
#structure) entries. Each of these entries looks as follows:
#B kpFirst This field contains the first (left) character of the kerning pair.
#B kpSecond This field contains the second (right) character of the kerning pair.
#h kpKernAmount This field contains the kerning amount in font units, the value
# is mostly negative.
#KERNTRACK KerningTracks[]
#The KerningTracks table need not be present in a PFM file for a Type-1 font, if it exists it contains etmKernTracks (from the EXTTEXTMETRIC structure) entries. Each of these entries looks as follows:
#h ktDegree This field contains the amount to change the character spacing. Negative values mean closer together, positive values mean farther apart.
#h ktMinSize This field contains the minimum font height (in device units) for which to use linear track kerning.
#h ktMinAmount This field contains the track kerning amount to use for font heights less or equal ktMinSize.
#h ktMaxSize This field contains the maximum font height (in device units) for which to use linear track kerning. For font heights between ktMinSize and ktMaxSize the track kerning amount has to increase linearily from ktMinAmount to ktMaxAmount.
#h ktMaxAmount This field contains the track kerning amount to use for font heights greater or equal ktMaxSize.
if __name__=='__main__':
from glob import glob
for f in glob('/Program Files/Adobe/Acrobat 4.0/resource/font/pfm/*.pfm'):
print f
p=PFM(f)
p.dump()
| |
import string
import struct
import six
from collections import defaultdict
from dateutil import parser
def remove_symbols(text):
return text.translate(text.maketrans("",""), string.punctuation)
def int_or_none(text):
if text is None:
return
if isinstance(text, six.integer_types):
return text
try:
num = int(text)
except TypeError:
num = None
return num
def hex_or_none(text):
"""
Get an integer from a hex number or return None
"""
if text is None:
return
if isinstance(text, six.integer_types):
return text
try:
num = int(text, 16)
except ValueError:
num = None
return num
def parse_zxid(text):
"""
Parse a zookeeper transaction id into its epoch and number.
zxid is the global (cluster wide) transaction identifier.
The upper 32 bits of which are the epoch number (changes when leadership changes)
and the lower 32 bits which are the xid (transaction id) proper
"""
if text is None:
return
if text == '0xffffffffffffffff':
return None
# Parse as a 64 bit hex int
zxid = int(text.strip(), 16)
# convert to bytes
try:
zxid_bytes = struct.pack('>q', zxid)
except struct.error as e:
raise ValueError("Unable to pack struct, from value: %s, input: %s - %s" % (zxid, text, e))
# the higher order 4 bytes is the epoch
(zxid_epoch,) = struct.unpack('>i', zxid_bytes[0:4])
# the lower order 4 bytes is the count
(zxid_count,) = struct.unpack('>i', zxid_bytes[4:8])
return zxid_epoch, zxid_count
def parse_admin_wchc(text):
"""
Parser zookeeper admin command `wchp`
wchp - watches by session
Returns a dictionary where the keys are integer session ids, and the value is a list of files.
0x15dc0117fd6633a
/clusterstate.json
/clusterprops.json
/aliases.json
0x15dc0117fd66384
/clusterstate.json
/clusterprops.json
/aliases.json
0x15dc0117fd670fe
/clusterstate.json
/clusterprops.json
/aliases.json
0x15dc0117fd631b9
/clusterstate.json
/collections/efc-jobs-suggest/leader_elect/shard1/election/242534181306977427-core_node3-n_0000000111
/overseer_elect/election/242534181306977427-10.51.65.74:8983_solr-n_0000121138
/collections/efc-profiles-col/state.json
/collections/efc-profiles-col/leader_elect/shard1/election/242534181306977427-core_node10-n_0000000443
/collections/jsm-efc-jobs-col/state.json
/collections/efc-jobs-suggest/state.json
/collections/efc-jobsearch-col/state.json
/configs/efc-jobs-suggest-2017-07-26T21:38:56.326374
/security.json
/collections/efc-jobsearch-col/leader_elect/shard1/election/242534181306977427-core_node9-n_0000000246
/configs/efc-profiles-match-2017-03-06T22:33:54.325668
/configs/efc-jobs-2017-07-27T15:52:13.401112
/clusterprops.json
/collections/jsm-efc-jobs-col/leader_elect/shard1/election/242534181306977427-core_node6-n_0000001274
/configs/jsm-efc-jobs-2017-08-14T22:58:17.350259
/collections/efc-profiles-match-col/leader_elect/shard1/election/242534181306977427-core_node10-n_0000000245
/configs/efc-profiles-2017-06-08T15:34:56.672279
/aliases.json
/collections/efc-profiles-match-col/state.json
/configs/jsm-efc-jobs-2017-08-16T23:43:42.259417
0x15dc0117fd631b6
/clusterstate.json
/collections/efc-profiles-col/state.json
/collections/jsm-efc-jobs-col/state.json
/collections/efc-jobs-suggest/state.json
/collections/efc-jobsearch-col/state.json
/configs/efc-jobs-suggest-2017-07-26T21:38:56.326374
/security.json
/configs/efc-profiles-match-2017-03-06T22:33:54.325668
/configs/efc-jobs-2017-07-27T15:52:13.401112
/clusterprops.json
/configs/jsm-efc-jobs-2017-08-14T22:58:17.350259
/configs/efc-profiles-2017-06-08T15:34:56.672279
/aliases.json
/collections/efc-profiles-match-col/state.json
/configs/jsm-efc-jobs-2017-08-16T23:43:42.259417
"""
data = defaultdict(list)
ZNODE_IDENT = '/'
SESSION_IDENT = '0x'
session = None
path = None
for line in text.split("\n"):
line = line.strip()
if not line:
continue
if line.startswith(ZNODE_IDENT):
path = line
elif line.startswith(SESSION_IDENT):
session = hex_or_none(line)
else:
continue
if not all((session, path)):
continue
data[session].append(path)
return data
def parse_admin_wchp(text):
"""
Parser zookeeper admin command `wchp`
wchp - watches by path name.
Returns a dictionary where the keys are znode paths, and the value is a list of integer session ids
Example::
/collections/efc-profiles-col/leader_elect/shard1/election/98445948263739830-core_node8-n_0000000442
0x35da78d8ab14c93
/collections/jsm-efc-jobs-col/leader_elect/shard1/election/98445948263739830-core_node4-n_0000001273
0x35da78d8ab14c93
/security.json
0x35da78d8ab14c93
/overseer_elect/election/98445948263739830-10.51.64.251:8983_solr-n_0000121137
0x35da78d8ab14c93
/configs/efc-profiles-match-2017-03-06T22:33:54.325668
0x35da78d8ab14c93
/configs/jsm-efc-jobs-2017-08-14T22:58:17.350259
0x35da78d8ab14c93
/aliases.json
0x25d9a46df6374da
0x15d9a46de66261f
0x35da78d8ab16651
0x35da78d8ab16ad3
0x35da78d8ab1664f
0x35da78d8ab16a05
0x25d9a46df63262a
0x35da78d8ab1178b
0x35da78d8ab14c93
/collections/efc-profiles-match-col/state.json
0x35da78d8ab14c93
/clusterstate.json
0x25d9a46df6374da
0x15d9a46de66261f
0x35da78d8ab16651
0x35da78d8ab16ad3
0x35da78d8ab1664f
0x35da78d8ab16a05
0x25d9a46df63262a
0x35da78d8ab1178b
0x35da78d8ab14c93
/collections/efc-profiles-col/state.json
0x35da78d8ab14c93
/collections/jsm-efc-jobs-col/state.json
0x35da78d8ab14c93
/collections/efc-jobs-suggest/state.json
0x35da78d8ab14c93
/collections/efc-jobsearch-col/state.json
0x35da78d8ab14c93
/configs/efc-jobs-suggest-2017-07-26T21:38:56.326374
0x35da78d8ab14c93
/collections/efc-jobs-suggest/leader_elect/shard1/election/98445948263739830-core_node1-n_0000000110
0x35da78d8ab14c93
/configs/efc-jobs-2017-07-27T15:52:13.401112
0x35da78d8ab14c93
/clusterprops.json
0x25d9a46df6374da
0x15d9a46de66261f
0x35da78d8ab16651
0x35da78d8ab16ad3
0x35da78d8ab1664f
0x35da78d8ab16a05
0x25d9a46df63262a
0x35da78d8ab1178b
0x35da78d8ab14c93
/collections/efc-jobsearch-col/leader_elect/shard1/election/98445948263739830-core_node7-n_0000000245
0x35da78d8ab14c93
/collections/efc-profiles-match-col/leader_elect/shard1/election/98445948263739830-core_node8-n_0000000244
0x35da78d8ab14c93
/configs/efc-profiles-2017-06-08T15:34:56.672279
0x35da78d8ab14c93
/configs/jsm-efc-jobs-2017-08-16T23:43:42.259417
0x35da78d8ab14c93
"""
data = defaultdict(list)
ZNODE_IDENT = '/'
SESSION_IDENT = '0x'
session = None
path = None
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith(ZNODE_IDENT):
path = line
elif line.startswith(SESSION_IDENT):
session = hex_or_none(line)
else:
continue
if not all((session, path)):
continue
data[path].append(session)
return data
def parse_admin_cons(text):
"""
Parse zookeeper admin command 'cons' output into a data structure.
`cons` returns connection information for a particular server.
sid is the session id
lop is the last operation performed by the client
est is the time the session was originally established
to is the negotiated client timeout
lcxid is the last client transaction id
lzxid is the last zxid (-1 is used for pings)
lresp is the last time that the server responded to a client request
llat is the latency of the latest operation
minlat/avglat/maxlat are the min/avg/max latency for the session in milliseconds
Example::
/10.100.200.113:25037[0](queued=0,recved=1,sent=0)
/10.17.72.197:44830[1](queued=0,recved=230457,sent=230457,sid=0x35b643799ab346b,lop=GETD,est=1496775835190,to=15000,lcxid=0x38435,lzxid=0x1500074205,lresp=1496785171003,llat=0,minlat=0,avglat=0,maxlat=15)
/10.17.73.20:55374[1](queued=0,recved=9758,sent=9758,sid=0x35b643799ab3458,lop=GETD,est=1496774945164,to=15000,lcxid=0x223c,lzxid=0x1500074204,lresp=1496785169077,llat=0,minlat=0,avglat=0,maxlat=3)
/10.17.73.130:37298[1](queued=0,recved=205948,sent=205948,sid=0x35b643799ab345d,lop=GETD,est=1496775142238,to=15000,lcxid=0x32469,lzxid=0x1500074205,lresp=1496785171187,llat=0,minlat=0,avglat=0,maxlat=6)
/10.17.73.20:57459[1](queued=0,recved=2065,sent=2065,sid=0x35b643799ab3467,lop=PING,est=1496775601756,to=15000,lcxid=0xaf,lzxid=0xffffffffffffffff,lresp=1496785168136,llat=0,minlat=0,avglat=0,maxlat=3)
/10.100.200.113:25036[1](queued=0,recved=1,sent=1,sid=0x35b643799ab350a,lop=SESS,est=1496785170946,to=15000,lcxid=0x0,lzxid=0x1500074205,lresp=1496785170949,llat=2,minlat=0,avglat=2,maxlat=2)
/10.17.72.21:46963[1](queued=0,recved=1929,sent=1929,sid=0x35b643799ab3465,lop=PING,est=1496775600025,to=15000,lcxid=0x11,lzxid=0xffffffffffffffff,lresp=1496785167415,llat=0,minlat=0,avglat=0,maxlat=3)
/10.50.66.190:40744[1](queued=0,recved=1752,sent=1762,sid=0x35b643799ab344e,lop=PING,est=149677443
Outputs::
[{'avglat': 0,
'client': ['10.51.65.171', '41322'],
'connections': 1,
'est': 1496782363613L,
'lcxid': 165956,
'llat': 1,
'lop': 'PING',
'lresp': 1496799079512L,
'lzxid': (67, 1684),
'maxlat': 5,
'minlat': 0,
'queued': 0,
'recved': 8251,
'sent': 8251,
'sid': 170105861950612956L,
'to': 15000,
'ueued': '0'},
{...},
{...}]
"""
data = []
for line in text.splitlines():
entry = {
'client': None,
'connections': 0,
'queued': 0,
'recved': 0,
'sent': 0,
'sid': None,
'lop': None,
'est': None,
'to': None,
'lcxid': None,
'lzxid': None,
'lresp': None,
'llat': None,
'minlat': None,
'avglat': None,
'maxlat': None
}
line = line.strip()
if not line.startswith('/'):
continue
addr, other = line.split('[', 1)
addr, port = addr.split(':')
entry['client'] = [addr[1:], port]
cons_count, other = other.split(']', 1)
entry['connections'] = int_or_none(cons_count)
if not other.startswith('('):
raise ValueError("unexpected format... expected start char: '(' got: %s" % other) #XXX
continue
if other.endswith(')'):
other = other[1:-1]
else:
other = other[1:]
sess_vals = other[1:].split(',')
for val in sess_vals:
if '=' not in val:
continue
key, strval = val.strip().split('=')
entry[key] = strval
entry['queued'] = int_or_none(entry['queued'])
entry['recved'] = int_or_none(entry['recved'])
entry['sent'] = int_or_none(entry['sent'])
entry['sid'] = hex_or_none(entry['sid'])
entry['est'] = int_or_none(entry['est'])
entry['to'] = int_or_none(entry['to'])
entry['lcxid'] = hex_or_none(entry['lcxid'])
entry['lzxid'] = parse_zxid(entry['lzxid'])
entry['lresp'] = int_or_none(entry['lresp'])
entry['llat'] = int_or_none(entry['llat'])
entry['minlat'] = int_or_none(entry['minlat'])
entry['avglat'] = int_or_none(entry['avglat'])
entry['maxlat'] = int_or_none(entry['maxlat'])
data.append(entry)
return data
def parse_admin_dump(text):
"""
Example Input::
SessionTracker dump:
Session Sets (13):
0 expire at Tue Jun 06 22:51:20 UTC 2017:
0 expire at Tue Jun 06 22:51:24 UTC 2017:
1 expire at Tue Jun 06 22:51:28 UTC 2017:
0x15c7ea7f00e002c
6 expire at Tue Jun 06 22:51:32 UTC 2017:
0x15c7ea7f00e002f
0x15c7ea7f00e0028
0x15c7ea7f00e0123
0x35b643799ab3467
0x25b643799ff348f
0x15c7ea7f00e0034
19 expire at Tue Jun 06 22:51:36 UTC 2017:
0x25b643799ff3479
0x25b643799ff3492
0x25b643799ff3487
0x15c7ea7f00e002d
0x25b643799ff3573
0x15c7ea7f00e003a
0x35b643799ab3553
0x25b643799ff3574
0x35b643799ab345d
0x35b643799ab3465
0x35b643799ab346b
0x25b643799ff3482
0x35b643799ab345a
0x25b643799ff3575
0x25b643799ff347f
0x15c7ea7f00e002a
0x25b643799ff34a2
0x15c7ea7f00e0031
0x35b643799ab3458
0 expire at Tue Jun 06 22:51:40 UTC 2017:
0 expire at Tue Jun 06 22:51:44 UTC 2017:
0 expire at Tue Jun 06 22:51:48 UTC 2017:
0 expire at Tue Jun 06 22:51:52 UTC 2017:
0 expire at Tue Jun 06 22:51:56 UTC 2017:
1 expire at Tue Jun 06 22:52:00 UTC 2017:
0x15c7ea7f00e000e
1 expire at Tue Jun 06 22:52:04 UTC 2017:
0x25b643799ff3464
1 expire at Tue Jun 06 22:52:08 UTC 2017:
0x35b643799ab344e
ephemeral nodes dump:
Sessions with Ephemerals (3):
0x25b643799ff3464:
/collections/efc-jobsearch-col/leader_elect/shard1/election/169839600926143588-core_node1-n_0000000181
/live_nodes/10.50.65.133:8983_solr
/overseer_
Outputs::
{'ephemerals': {170105861950612946L: ['/collections/efc-profiles-col/leaders/shard1/leader',
'/collections/efc-profiles-match-col/leader_elect/shard1/election/170105861950612946-core_node10-n_0000000230',
'/live_nodes/10.51.64.201:8983_solr',
'/collections/jsm-efc-jobs-col/leaders/shard1/leader',
'/overseer_elect/leader',
'/collections/jsm-efc-jobs-col/leader_elect/shard1/election/170105861950612946-core_node6-n_0000000152',
'/collections/efc-profiles-col/leader_elect/shard1/election/170105861950612946-core_node10-n_0000000205',
'/collections/efc-jobsearch-col/leaders/shard1/leader',
'/collections/efc-jobs-suggest/leaders/shard1/leader',
'/collections/efc-jobs-suggest/leader_elect/shard1/election/170105861950612946-core_node3-n_0000000094',
'/collections/efc-jobsearch-col/leader_elect/shard1/election/170105861950612946-core_node12-n_0000000225',
'/overseer_elect/election/170105861950612946-10.51.64.201:8983_solr-n_0000000528',
'/collections/efc-profiles-match-col/leaders/shard1/leader'],
170105861950612956L: ['/collections/efc-profiles-match-col/leader_elect/shard1/election/170105861950612956-core_node9-n_0000000232',
'/collections/efc-profiles-col/leader_elect/shard1/election/170105861950612956-core_node9-n_0000000207',
'/overseer_elect/election/170105861950612956-10.51.65.171:8983_solr-n_0000000530',
'/live_nodes/10.51.65.171:8983_solr',
'/collections/efc-jobsearch-col/leader_elect/shard1/election/170105861950612956-core_node11-n_0000000227',
'/collections/jsm-efc-jobs-col/leader_elect/shard1/election/170105861950612956-core_node5-n_0000000154',
'/collections/efc-jobs-suggest/leader_elect/shard1/election/170105861950612956-core_node2-n_0000000095']},
'sessions': [{'expires': datetime.datetime(2017, 6, 7, 1, 31, 32, tzinfo=tzutc()),
'session': 170150722986836265L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 34, tzinfo=tzutc()),
'session': 242208317088530708L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 34, tzinfo=tzutc()),
'session': 170150722986836267L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 34, tzinfo=tzutc()),
'session': 98048267905335296L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 34, tzinfo=tzutc()),
'session': 170150722986836266L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 36, tzinfo=tzutc()),
'session': 170105861950612956L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 36, tzinfo=tzutc()),
'session': 170105861950613222L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 36, tzinfo=tzutc()),
'session': 242163455995094763L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 38, tzinfo=tzutc()),
'session': 170105861950613284L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 38, tzinfo=tzutc()),
'session': 98048267905345512L},
{'expires': datetime.datetime(2017, 6, 7, 1, 31, 40, tzinfo=tzutc()),
'session': 170105861950612946L}]}
"""
SESSIONS = 'sessions'
EPHEMERALS = 'ephemerals'
mode = None
bucket = None
data = {
SESSIONS: [],
EPHEMERALS: {},
}
for line in text.splitlines():
if 'sessiontracker dump' in line.lower():
mode = SESSIONS
elif 'ephemeral nodes dump' in line.lower():
mode = EPHEMERALS
elif 'expire at' in line:
count, date = line.split('expire at')
if date.endswith(':'):
date = date[:-1]
date = parser.parse(date)
bucket = date
elif '0x' in line:
if line.endswith(':'):
line = line[:-1]
session = hex_or_none(line)
if mode == SESSIONS:
data[SESSIONS].append({'session': session, 'expires': bucket})
elif mode == EPHEMERALS:
bucket = session
elif mode == EPHEMERALS and line.strip().startswith('/'):
if bucket not in data[EPHEMERALS]:
data[EPHEMERALS][bucket] = []
data[EPHEMERALS][bucket].append(line.strip())
return data
| |
""" openconfig_platform_types
This module defines data types (e.g., YANG identities)
to support the OpenConfig component inventory model.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class Openconfig_Hardware_ComponentIdentity(object):
"""
Base identity for hardware related components in a managed
device. Derived identities are partially based on contents
of the IANA Entity MIB.
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['Openconfig_Hardware_ComponentIdentity']['meta_info']
class Openconfig_Software_ComponentIdentity(object):
"""
Base identity for software\-related components in a managed
device
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
pass
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['Openconfig_Software_ComponentIdentity']['meta_info']
class CpuIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Processing unit, e.g., a management processor
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['CpuIdentity']['meta_info']
class SensorIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Physical sensor, e.g., a temperature sensor in a chassis
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['SensorIdentity']['meta_info']
class ModuleIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Replaceable hardware module, e.g., a daughtercard
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['ModuleIdentity']['meta_info']
class TransceiverIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Pluggable module present in a port
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['TransceiverIdentity']['meta_info']
class FanIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Cooling fan, or could be some other heat\-reduction component
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['FanIdentity']['meta_info']
class Operating_SystemIdentity(Openconfig_Software_ComponentIdentity):
"""
Operating system running on a component
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Software_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['Operating_SystemIdentity']['meta_info']
class Power_SupplyIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Component that is supplying power to the device
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['Power_SupplyIdentity']['meta_info']
class BackplaneIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Backplane component for aggregating traffic, typically
contained in a chassis component
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['BackplaneIdentity']['meta_info']
class LinecardIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Linecard component, typically inserted into a chassis slot
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['LinecardIdentity']['meta_info']
class ChassisIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Chassis component, typically with multiple slots / shelves
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['ChassisIdentity']['meta_info']
class PortIdentity(Openconfig_Hardware_ComponentIdentity):
"""
Physical port, e.g., for attaching pluggables and networking
cables
"""
_prefix = 'oc-platform-types'
_revision = '2016-06-06'
def __init__(self):
Openconfig_Hardware_ComponentIdentity.__init__(self)
@staticmethod
def _meta_info():
from ydk.models.openconfig._meta import _openconfig_platform_types as meta
return meta._meta_table['PortIdentity']['meta_info']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.