gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.exceptions import ChangedBehaviorWarning
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.multiclass import OneVsRestClassifier
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deterministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf = svm.SVC(kernel='linear', C=0.1).fit(X_train, y_train)
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_predict():
# Test SVR's decision_function
# Sanity check, test that predict implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.predict(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='macro')
<= metrics.f1_score(y, y_pred_balanced,
average='macro'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
def test_linear_svx_uppercase_loss_penality_raises_error():
# Check if Upper case notation raises error at _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
assert_raise_message(ValueError, "loss='SQuared_hinge' is not supported",
svm.LinearSVC(loss="SQuared_hinge").fit, X, y)
assert_raise_message(ValueError, ("The combination of penalty='L2'"
" and loss='squared_hinge' is not supported"),
svm.LinearSVC(penalty="L2").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_linearsvc_fit_sampleweight():
# check correct result when sample_weight is 1
n_samples = len(X)
unit_weight = np.ones(n_samples)
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf_unitweight = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=unit_weight)
# check if same as sample_weight=None
assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvc_unflat = svm.LinearSVC(random_state=0).\
fit(X, Y, sample_weight=random_weight)
pred1 = lsvc_unflat.predict(T)
X_flat = np.repeat(X, random_weight, axis=0)
y_flat = np.repeat(Y, random_weight, axis=0)
lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
pred2 = lsvc_flat.predict(T)
assert_array_equal(pred1, pred2)
assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
def test_decision_function_shape_two_class():
for n_classes in [2, 3]:
X, y = make_blobs(centers=n_classes, random_state=0)
for estimator in [svm.SVC, svm.NuSVC]:
clf = OneVsRestClassifier(estimator(
decision_function_shape="ovr")).fit(X, y)
assert_equal(len(clf.predict(X)), len(y))
| |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from neutron_vpnaas.openstack.common import fileutils
from neutron_vpnaas.openstack.common._i18n import _, _LE, _LI
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Enables or disables inter-process locks.'),
cfg.StrOpt('lock_path',
default=os.environ.get("NEUTRON_VPNAAS_LOCK_PATH"),
help='Directory to use for lock files.')
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _FileLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def acquire(self):
basedir = os.path.dirname(self.fname)
if not os.path.exists(basedir):
fileutils.ensure_tree(basedir)
LOG.info(_LI('Created lock path: %s'), basedir)
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
LOG.debug('Got file lock "%s"', self.fname)
return True
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise threading.ThreadError(_("Unable to acquire lock on"
" `%(filename)s` due to"
" %(exception)s") %
{'filename': self.fname,
'exception': e})
def __enter__(self):
self.acquire()
return self
def release(self):
try:
self.unlock()
self.lockfile.close()
LOG.debug('Released file lock "%s"', self.fname)
except IOError:
LOG.exception(_LE("Could not release the acquired lock `%s`"),
self.fname)
def __exit__(self, exc_type, exc_val, exc_tb):
self.release()
def exists(self):
return os.path.exists(self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_FileLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _FcntlLock(_FileLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _FcntlLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
def _get_lock_path(name, lock_file_prefix, lock_path=None):
# NOTE(mikal): the lock name cannot contain directory
# separators
name = name.replace(os.sep, '_')
if lock_file_prefix:
sep = '' if lock_file_prefix.endswith('-') else '-'
name = '%s%s%s' % (lock_file_prefix, sep, name)
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
return os.path.join(local_lock_path, name)
def external_lock(name, lock_file_prefix=None, lock_path=None):
LOG.debug('Attempting to grab external lock "%(lock)s"',
{'lock': name})
lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path)
return InterProcessLock(lock_file_path)
def remove_external_lock_file(name, lock_file_prefix=None):
"""Remove an external lock file when it's not used anymore
This will be helpful when we have a lot of lock files
"""
with internal_lock(name):
lock_file_path = _get_lock_path(name, lock_file_prefix)
try:
os.remove(lock_file_path)
except OSError:
LOG.info(_LI('Failed to remove file %(file)s'),
{'file': lock_file_path})
def internal_lock(name):
with _semaphores_lock:
try:
sem = _semaphores[name]
LOG.debug('Using existing semaphore "%s"', name)
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
LOG.debug('Created new semaphore "%s"', name)
return sem
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
"""
int_lock = internal_lock(name)
with int_lock:
LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name})
try:
if external and not CONF.disable_process_locking:
ext_lock = external_lock(name, lock_file_prefix, lock_path)
with ext_lock:
yield ext_lock
else:
yield int_lock
finally:
LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name})
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug('Got semaphore / lock "%(function)s"',
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug('Semaphore / lock released "%(function)s"',
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr <etc>
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["NEUTRON_VPNAAS_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
# -*- test-case-name: twisted.test.test_sslverify -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
# Copyright (c) 2005-2008 Twisted Matrix Laboratories.
import itertools
from OpenSSL import SSL, crypto
from twisted.python import reflect, util
from twisted.python.hashlib import md5
from twisted.internet.defer import Deferred
from twisted.internet.error import VerifyError, CertificateError
# Private - shared between all OpenSSLCertificateOptions, counts up to provide
# a unique session id for each context
_sessionCounter = itertools.count().next
_x509names = {
'CN': 'commonName',
'commonName': 'commonName',
'O': 'organizationName',
'organizationName': 'organizationName',
'OU': 'organizationalUnitName',
'organizationalUnitName': 'organizationalUnitName',
'L': 'localityName',
'localityName': 'localityName',
'ST': 'stateOrProvinceName',
'stateOrProvinceName': 'stateOrProvinceName',
'C': 'countryName',
'countryName': 'countryName',
'emailAddress': 'emailAddress'}
class DistinguishedName(dict):
"""
Identify and describe an entity.
Distinguished names are used to provide a minimal amount of identifying
information about a certificate issuer or subject. They are commonly
created with one or more of the following fields::
commonName (CN)
organizationName (O)
organizationalUnitName (OU)
localityName (L)
stateOrProvinceName (ST)
countryName (C)
emailAddress
"""
__slots__ = ()
def __init__(self, **kw):
for k, v in kw.iteritems():
setattr(self, k, v)
def _copyFrom(self, x509name):
d = {}
for name in _x509names:
value = getattr(x509name, name, None)
if value is not None:
setattr(self, name, value)
def _copyInto(self, x509name):
for k, v in self.iteritems():
setattr(x509name, k, v)
def __repr__(self):
return '<DN %s>' % (dict.__repr__(self)[1:-1])
def __getattr__(self, attr):
try:
return self[_x509names[attr]]
except KeyError:
raise AttributeError(attr)
def __setattr__(self, attr, value):
assert type(attr) is str
if not attr in _x509names:
raise AttributeError("%s is not a valid OpenSSL X509 name field" % (attr,))
realAttr = _x509names[attr]
value = value.encode('ascii')
assert type(value) is str
self[realAttr] = value
def inspect(self):
"""
Return a multi-line, human-readable representation of this DN.
"""
l = []
lablen = 0
def uniqueValues(mapping):
return dict.fromkeys(mapping.itervalues()).keys()
for k in uniqueValues(_x509names):
label = util.nameToLabel(k)
lablen = max(len(label), lablen)
v = getattr(self, k, None)
if v is not None:
l.append((label, v))
lablen += 2
for n, (label, attr) in enumerate(l):
l[n] = (label.rjust(lablen)+': '+ attr)
return '\n'.join(l)
DN = DistinguishedName
class CertBase:
def __init__(self, original):
self.original = original
def _copyName(self, suffix):
dn = DistinguishedName()
dn._copyFrom(getattr(self.original, 'get_'+suffix)())
return dn
def getSubject(self):
"""
Retrieve the subject of this certificate.
@rtype: L{DistinguishedName}
@return: A copy of the subject of this certificate.
"""
return self._copyName('subject')
def _handleattrhelper(Class, transport, methodName):
"""
(private) Helper for L{Certificate.peerFromTransport} and
L{Certificate.hostFromTransport} which checks for incompatible handle types
and null certificates and raises the appropriate exception or returns the
appropriate certificate object.
"""
method = getattr(transport.getHandle(),
"get_%s_certificate" % (methodName,), None)
if method is None:
raise CertificateError(
"non-TLS transport %r did not have %s certificate" % (transport, methodName))
cert = method()
if cert is None:
raise CertificateError(
"TLS transport %r did not have %s certificate" % (transport, methodName))
return Class(cert)
class Certificate(CertBase):
"""
An x509 certificate.
"""
def __repr__(self):
return '<%s Subject=%s Issuer=%s>' % (self.__class__.__name__,
self.getSubject().commonName,
self.getIssuer().commonName)
def __eq__(self, other):
if isinstance(other, Certificate):
return self.dump() == other.dump()
return False
def __ne__(self, other):
return not self.__eq__(other)
def load(Class, requestData, format=crypto.FILETYPE_ASN1, args=()):
"""
Load a certificate from an ASN.1- or PEM-format string.
@rtype: C{Class}
"""
return Class(crypto.load_certificate(format, requestData), *args)
load = classmethod(load)
_load = load
def dumpPEM(self):
"""
Dump this certificate to a PEM-format data string.
@rtype: C{str}
"""
return self.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load a certificate from a PEM-format data string.
@rtype: C{Class}
"""
return Class.load(data, crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def peerFromTransport(Class, transport):
"""
Get the certificate for the remote end of the given transport.
@type: L{ISystemHandle}
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a peer
certificate.
"""
return _handleattrhelper(Class, transport, 'peer')
peerFromTransport = classmethod(peerFromTransport)
def hostFromTransport(Class, transport):
"""
Get the certificate for the local end of the given transport.
@param transport: an L{ISystemHandle} provider; the transport we will
@rtype: C{Class}
@raise: L{CertificateError}, if the given transport does not have a host
certificate.
"""
return _handleattrhelper(Class, transport, 'host')
hostFromTransport = classmethod(hostFromTransport)
def getPublicKey(self):
"""
Get the public key for this certificate.
@rtype: L{PublicKey}
"""
return PublicKey(self.original.get_pubkey())
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate(format, self.original)
def serialNumber(self):
"""
Retrieve the serial number of this certificate.
@rtype: C{int}
"""
return self.original.get_serial_number()
def digest(self, method='md5'):
"""
Return a digest hash of this certificate using the specified hash
algorithm.
@param method: One of C{'md5'} or C{'sha'}.
@rtype: C{str}
"""
return self.original.digest(method)
def _inspect(self):
return '\n'.join(['Certificate For Subject:',
self.getSubject().inspect(),
'\nIssuer:',
self.getIssuer().inspect(),
'\nSerial Number: %d' % self.serialNumber(),
'Digest: %s' % self.digest()])
def inspect(self):
"""
Return a multi-line, human-readable representation of this
Certificate, including information about the subject, issuer, and
public key.
"""
return '\n'.join((self._inspect(), self.getPublicKey().inspect()))
def getIssuer(self):
"""
Retrieve the issuer of this certificate.
@rtype: L{DistinguishedName}
@return: A copy of the issuer of this certificate.
"""
return self._copyName('issuer')
def options(self, *authorities):
raise NotImplementedError('Possible, but doubtful we need this yet')
class CertificateRequest(CertBase):
"""
An x509 certificate request.
Certificate requests are given to certificate authorities to be signed and
returned resulting in an actual certificate.
"""
def load(Class, requestData, requestFormat=crypto.FILETYPE_ASN1):
req = crypto.load_certificate_request(requestFormat, requestData)
dn = DistinguishedName()
dn._copyFrom(req.get_subject())
if not req.verify(req.get_pubkey()):
raise VerifyError("Can't verify that request for %r is self-signed." % (dn,))
return Class(req)
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_certificate_request(format, self.original)
class PrivateCertificate(Certificate):
"""
An x509 certificate and private key.
"""
def __repr__(self):
return Certificate.__repr__(self) + ' with ' + repr(self.privateKey)
def _setPrivateKey(self, privateKey):
if not privateKey.matches(self.getPublicKey()):
raise VerifyError(
"Certificate public and private keys do not match.")
self.privateKey = privateKey
return self
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
"""
Create a new L{PrivateCertificate} from the given certificate data and
this instance's private key.
"""
return self.load(newCertData, self.privateKey, format)
def load(Class, data, privateKey, format=crypto.FILETYPE_ASN1):
return Class._load(data, format)._setPrivateKey(privateKey)
load = classmethod(load)
def inspect(self):
return '\n'.join([Certificate._inspect(self),
self.privateKey.inspect()])
def dumpPEM(self):
"""
Dump both public and private parts of a private certificate to
PEM-format data.
"""
return self.dump(crypto.FILETYPE_PEM) + self.privateKey.dump(crypto.FILETYPE_PEM)
def loadPEM(Class, data):
"""
Load both private and public parts of a private certificate from a
chunk of PEM-format data.
"""
return Class.load(data, KeyPair.load(data, crypto.FILETYPE_PEM),
crypto.FILETYPE_PEM)
loadPEM = classmethod(loadPEM)
def fromCertificateAndKeyPair(Class, certificateInstance, privateKey):
privcert = Class(certificateInstance.original)
return privcert._setPrivateKey(privateKey)
fromCertificateAndKeyPair = classmethod(fromCertificateAndKeyPair)
def options(self, *authorities):
options = dict(privateKey=self.privateKey.original,
certificate=self.original)
if authorities:
options.update(dict(verify=True,
requireCertificate=True,
caCerts=[auth.original for auth in authorities]))
return OpenSSLCertificateOptions(**options)
def certificateRequest(self, format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
return self.privateKey.certificateRequest(
self.getSubject(),
format,
digestAlgorithm)
def signCertificateRequest(self,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1):
issuer = self.getSubject()
return self.privateKey.signCertificateRequest(
issuer,
requestData,
verifyDNCallback,
serialNumber,
requestFormat,
certificateFormat)
def signRequestObject(self, certificateRequest, serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
return self.privateKey.signRequestObject(self.getSubject(),
certificateRequest,
serialNumber,
secondsToExpiry,
digestAlgorithm)
class PublicKey:
def __init__(self, osslpkey):
self.original = osslpkey
req1 = crypto.X509Req()
req1.set_pubkey(osslpkey)
self._emptyReq = crypto.dump_certificate_request(crypto.FILETYPE_ASN1, req1)
def matches(self, otherKey):
return self._emptyReq == otherKey._emptyReq
# XXX This could be a useful method, but sometimes it triggers a segfault,
# so we'll steer clear for now.
# def verifyCertificate(self, certificate):
# """
# returns None, or raises a VerifyError exception if the certificate
# could not be verified.
# """
# if not certificate.original.verify(self.original):
# raise VerifyError("We didn't sign that certificate.")
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.keyHash())
def keyHash(self):
"""
MD5 hex digest of signature on an empty certificate request with this
key.
"""
return md5(self._emptyReq).hexdigest()
def inspect(self):
return 'Public Key with Hash: %s' % (self.keyHash(),)
class KeyPair(PublicKey):
def load(Class, data, format=crypto.FILETYPE_ASN1):
return Class(crypto.load_privatekey(format, data))
load = classmethod(load)
def dump(self, format=crypto.FILETYPE_ASN1):
return crypto.dump_privatekey(format, self.original)
def __getstate__(self):
return self.dump()
def __setstate__(self, state):
self.__init__(crypto.load_privatekey(crypto.FILETYPE_ASN1, state))
def inspect(self):
t = self.original.type()
if t == crypto.TYPE_RSA:
ts = 'RSA'
elif t == crypto.TYPE_DSA:
ts = 'DSA'
else:
ts = '(Unknown Type!)'
L = (self.original.bits(), ts, self.keyHash())
return '%s-bit %s Key Pair with Hash: %s' % L
def generate(Class, kind=crypto.TYPE_RSA, size=1024):
pkey = crypto.PKey()
pkey.generate_key(kind, size)
return Class(pkey)
def newCertificate(self, newCertData, format=crypto.FILETYPE_ASN1):
return PrivateCertificate.load(newCertData, self, format)
generate = classmethod(generate)
def requestObject(self, distinguishedName, digestAlgorithm='md5'):
req = crypto.X509Req()
req.set_pubkey(self.original)
distinguishedName._copyInto(req.get_subject())
req.sign(self.original, digestAlgorithm)
return CertificateRequest(req)
def certificateRequest(self, distinguishedName,
format=crypto.FILETYPE_ASN1,
digestAlgorithm='md5'):
"""Create a certificate request signed with this key.
@return: a string, formatted according to the 'format' argument.
"""
return self.requestObject(distinguishedName, digestAlgorithm).dump(format)
def signCertificateRequest(self,
issuerDistinguishedName,
requestData,
verifyDNCallback,
serialNumber,
requestFormat=crypto.FILETYPE_ASN1,
certificateFormat=crypto.FILETYPE_ASN1,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Given a blob of certificate request data and a certificate authority's
DistinguishedName, return a blob of signed certificate data.
If verifyDNCallback returns a Deferred, I will return a Deferred which
fires the data when that Deferred has completed.
"""
hlreq = CertificateRequest.load(requestData, requestFormat)
dn = hlreq.getSubject()
vval = verifyDNCallback(dn)
def verified(value):
if not value:
raise VerifyError("DN callback %r rejected request DN %r" % (verifyDNCallback, dn))
return self.signRequestObject(issuerDistinguishedName, hlreq,
serialNumber, secondsToExpiry, digestAlgorithm).dump(certificateFormat)
if isinstance(vval, Deferred):
return vval.addCallback(verified)
else:
return verified(vval)
def signRequestObject(self,
issuerDistinguishedName,
requestObject,
serialNumber,
secondsToExpiry=60 * 60 * 24 * 365, # One year
digestAlgorithm='md5'):
"""
Sign a CertificateRequest instance, returning a Certificate instance.
"""
req = requestObject.original
dn = requestObject.getSubject()
cert = crypto.X509()
issuerDistinguishedName._copyInto(cert.get_issuer())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(secondsToExpiry)
cert.set_serial_number(serialNumber)
cert.sign(self.original, digestAlgorithm)
return Certificate(cert)
def selfSignedCert(self, serialNumber, **kw):
dn = DN(**kw)
return PrivateCertificate.fromCertificateAndKeyPair(
self.signRequestObject(dn, self.requestObject(dn), serialNumber),
self)
class OpenSSLCertificateOptions(object):
"""
A factory for SSL context objects for both SSL servers and clients.
"""
_context = None
# Older versions of PyOpenSSL didn't provide OP_ALL. Fudge it here, just in case.
_OP_ALL = getattr(SSL, 'OP_ALL', 0x0000FFFF)
# OP_NO_TICKET is not (yet) exposed by PyOpenSSL
_OP_NO_TICKET = 0x00004000
method = SSL.TLSv1_METHOD
def __init__(self,
privateKey=None,
certificate=None,
method=None,
verify=False,
caCerts=None,
verifyDepth=9,
requireCertificate=True,
verifyOnce=True,
enableSingleUseKeys=True,
enableSessions=True,
fixBrokenPeers=False,
enableSessionTickets=False):
"""
Create an OpenSSL context SSL connection context factory.
@param privateKey: A PKey object holding the private key.
@param certificate: An X509 object holding the certificate.
@param method: The SSL protocol to use, one of SSLv23_METHOD,
SSLv2_METHOD, SSLv3_METHOD, TLSv1_METHOD. Defaults to TLSv1_METHOD.
@param verify: If True, verify certificates received from the peer and
fail the handshake if verification fails. Otherwise, allow anonymous
sessions and sessions with certificates which fail validation. By
default this is False.
@param caCerts: List of certificate authority certificate objects to
use to verify the peer's certificate. Only used if verify is
C{True}, and if verify is C{True}, this must be specified. Since
verify is C{False} by default, this is C{None} by default.
@type caCerts: C{list} of L{OpenSSL.crypto.X509}
@param verifyDepth: Depth in certificate chain down to which to verify.
If unspecified, use the underlying default (9).
@param requireCertificate: If True, do not allow anonymous sessions.
@param verifyOnce: If True, do not re-verify the certificate
on session resumption.
@param enableSingleUseKeys: If True, generate a new key whenever
ephemeral DH parameters are used to prevent small subgroup attacks.
@param enableSessions: If True, set a session ID on each context. This
allows a shortened handshake to be used when a known client reconnects.
@param fixBrokenPeers: If True, enable various non-spec protocol fixes
for broken SSL implementations. This should be entirely safe,
according to the OpenSSL documentation, but YMMV. This option is now
off by default, because it causes problems with connections between
peers using OpenSSL 0.9.8a.
@param enableSessionTickets: If True, enable session ticket extension
for session resumption per RFC 5077. Note there is no support for
controlling session tickets. This option is off by default, as some
server implementations don't correctly process incoming empty session
ticket extensions in the hello.
"""
assert (privateKey is None) == (certificate is None), "Specify neither or both of privateKey and certificate"
self.privateKey = privateKey
self.certificate = certificate
if method is not None:
self.method = method
self.verify = verify
assert ((verify and caCerts) or
(not verify)), "Specify client CA certificate information if and only if enabling certificate verification"
self.caCerts = caCerts
self.verifyDepth = verifyDepth
self.requireCertificate = requireCertificate
self.verifyOnce = verifyOnce
self.enableSingleUseKeys = enableSingleUseKeys
self.enableSessions = enableSessions
self.fixBrokenPeers = fixBrokenPeers
self.enableSessionTickets = enableSessionTickets
def __getstate__(self):
d = self.__dict__.copy()
try:
del d['_context']
except KeyError:
pass
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""Return a SSL.Context object.
"""
if self._context is None:
self._context = self._makeContext()
return self._context
def _makeContext(self):
ctx = SSL.Context(self.method)
if self.certificate is not None and self.privateKey is not None:
ctx.use_certificate(self.certificate)
ctx.use_privatekey(self.privateKey)
# Sanity check
ctx.check_privatekey()
verifyFlags = SSL.VERIFY_NONE
if self.verify:
verifyFlags = SSL.VERIFY_PEER
if self.requireCertificate:
verifyFlags |= SSL.VERIFY_FAIL_IF_NO_PEER_CERT
if self.verifyOnce:
verifyFlags |= SSL.VERIFY_CLIENT_ONCE
if self.caCerts:
store = ctx.get_cert_store()
for cert in self.caCerts:
store.add_cert(cert)
# It'd be nice if pyOpenSSL let us pass None here for this behavior (as
# the underlying OpenSSL API call allows NULL to be passed). It
# doesn't, so we'll supply a function which does the same thing.
def _verifyCallback(conn, cert, errno, depth, preverify_ok):
return preverify_ok
ctx.set_verify(verifyFlags, _verifyCallback)
if self.verifyDepth is not None:
ctx.set_verify_depth(self.verifyDepth)
if self.enableSingleUseKeys:
ctx.set_options(SSL.OP_SINGLE_DH_USE)
if self.fixBrokenPeers:
ctx.set_options(self._OP_ALL)
if self.enableSessions:
sessionName = md5("%s-%d" % (reflect.qual(self.__class__), _sessionCounter())).hexdigest()
ctx.set_session_id(sessionName)
if not self.enableSessionTickets:
ctx.set_options(self._OP_NO_TICKET)
return ctx
| |
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 501ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Visualize morphologies."""
from matplotlib.collections import LineCollection, PatchCollection
from matplotlib.lines import Line2D
from matplotlib.patches import Circle, FancyArrowPatch, Polygon, Rectangle
from mpl_toolkits.mplot3d.art3d import Line3DCollection
import numpy as np
from neurom import NeuriteType, geom
from neurom.core import iter_neurites, iter_segments, iter_sections
from neurom.core._soma import SomaCylinders
from neurom.core.dataformat import COLS
from neurom.core.types import tree_type_checker
from neurom.morphmath import segment_radius
from neurom.view.dendrogram import Dendrogram, layout_dendrogram, get_size, move_positions
from . import common
_LINEWIDTH = 1.2
_ALPHA = 0.8
_DIAMETER_SCALE = 1.0
TREE_COLOR = {NeuriteType.basal_dendrite: 'red',
NeuriteType.apical_dendrite: 'purple',
NeuriteType.axon: 'blue',
NeuriteType.soma: 'black',
NeuriteType.undefined: 'green'}
def _plane2col(plane):
"""Take a string like 'xy', and return the indices from COLS.*."""
planes = ('xy', 'yx', 'xz', 'zx', 'yz', 'zy')
assert plane in planes, 'No such plane found! Please select one of: ' + str(planes)
return (getattr(COLS, plane[0].capitalize()),
getattr(COLS, plane[1].capitalize()), )
def _get_linewidth(tree, linewidth, diameter_scale):
"""Calculate the desired linewidth based on tree contents.
If diameter_scale exists, it is used to scale the diameter of each of the segments
in the tree
If diameter_scale is None, the linewidth is used.
"""
if diameter_scale is not None and tree:
linewidth = [2 * segment_radius(s) * diameter_scale
for s in iter_segments(tree)]
return linewidth
def _get_color(treecolor, tree_type):
"""If treecolor set, it's returned, otherwise tree_type is used to return set colors."""
if treecolor is not None:
return treecolor
return TREE_COLOR.get(tree_type, 'green')
def plot_tree(ax, tree, plane='xy',
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA, realistic_diameters=False):
"""Plots a 2d figure of the tree's segments.
Args:
ax(matplotlib axes): on what to plot
tree(neurom.core.Tree or neurom.core.Neurite): plotted tree
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
realistic_diameters(bool): scale linewidths with axis data coordinates
Note:
If the tree contains one single point the plot will be empty
since no segments can be constructed.
"""
plane0, plane1 = _plane2col(plane)
section_segment_list = [(section, segment)
for section in iter_sections(tree)
for segment in iter_segments(section)]
colors = [_get_color(color, section.type) for section, _ in section_segment_list]
if realistic_diameters:
def _get_rectangle(x, y, linewidth):
"""Draw a rectangle to represent a secgment."""
x, y = np.array(x), np.array(y)
diff = y - x
angle = np.arctan2(diff[1], diff[0]) % (2 * np.pi)
return Rectangle(x - linewidth / 2. * np.array([-np.sin(angle), np.cos(angle)]),
np.linalg.norm(diff),
linewidth,
np.rad2deg(angle))
segs = [_get_rectangle((seg[0][plane0], seg[0][plane1]),
(seg[1][plane0], seg[1][plane1]),
2 * segment_radius(seg) * diameter_scale)
for _, seg in section_segment_list]
collection = PatchCollection(segs, alpha=alpha, facecolors=colors)
else:
segs = [((seg[0][plane0], seg[0][plane1]),
(seg[1][plane0], seg[1][plane1]))
for _, seg in section_segment_list]
linewidth = _get_linewidth(
tree,
diameter_scale=diameter_scale,
linewidth=linewidth,
)
collection = LineCollection(segs, colors=colors, linewidth=linewidth, alpha=alpha)
ax.add_collection(collection)
def plot_soma(ax, soma, plane='xy',
soma_outline=True,
linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
"""Generates a 2d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
plane(str): Any pair of 'xyz'
soma_outline(bool): should the soma be drawn as an outline
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
plane0, plane1 = _plane2col(plane)
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.project_cylinder_onto_2d(ax, (plane0, plane1),
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
if soma_outline:
ax.add_artist(Circle(soma.center[[plane0, plane1]], soma.radius,
color=color, alpha=alpha))
else:
points = [[p[plane0], p[plane1]] for p in soma.iter()]
if points:
points.append(points[0]) # close the loop
x, y = tuple(np.array(points).T)
ax.plot(x, y, color=color, alpha=alpha, linewidth=linewidth)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
bounding_box = geom.bounding_box(soma)
ax.dataLim.update_from_data_xy(np.vstack(([bounding_box[0][plane0], bounding_box[0][plane1]],
[bounding_box[1][plane0], bounding_box[1][plane1]])),
ignore=False)
# pylint: disable=too-many-arguments
def plot_neuron(ax, nrn,
neurite_type=NeuriteType.all,
plane='xy',
soma_outline=True,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA, realistic_diameters=False):
"""Plots a 2D figure of the neuron, that contains a soma and the neurites.
Args:
ax(matplotlib axes): on what to plot
neurite_type(NeuriteType): an optional filter on the neurite type
nrn(neuron): neuron to be plotted
soma_outline(bool): should the soma be drawn as an outline
plane(str): Any pair of 'xyz'
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
realistic_diameters(bool): scale linewidths with axis data coordinates
"""
plot_soma(ax, nrn.soma, plane=plane, soma_outline=soma_outline, linewidth=linewidth,
color=color, alpha=alpha)
for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):
plot_tree(ax, neurite, plane=plane,
diameter_scale=diameter_scale, linewidth=linewidth,
color=color, alpha=alpha, realistic_diameters=realistic_diameters)
ax.set_title(nrn.name)
ax.set_xlabel(plane[0])
ax.set_ylabel(plane[1])
def _update_3d_datalim(ax, obj):
"""Unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually."""
min_bounding_box, max_bounding_box = geom.bounding_box(obj)
xy_bounds = np.vstack((min_bounding_box[:COLS.Z],
max_bounding_box[:COLS.Z]))
ax.xy_dataLim.update_from_data_xy(xy_bounds, ignore=False)
z_bounds = np.vstack(((min_bounding_box[COLS.Z], min_bounding_box[COLS.Z]),
(max_bounding_box[COLS.Z], max_bounding_box[COLS.Z])))
ax.zz_dataLim.update_from_data_xy(z_bounds, ignore=False)
def plot_tree3d(ax, tree,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
"""Generates a figure of the tree in 3d.
If the tree contains one single point the plot will be empty \
since no segments can be constructed.
Args:
ax(matplotlib axes): on what to plot
tree(neurom.core.Tree or neurom.core.Neurite): plotted tree
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
section_segment_list = [(section, segment)
for section in iter_sections(tree)
for segment in iter_segments(section)]
segs = [(seg[0][COLS.XYZ], seg[1][COLS.XYZ]) for _, seg in section_segment_list]
colors = [_get_color(color, section.type) for section, _ in section_segment_list]
linewidth = _get_linewidth(tree, diameter_scale=diameter_scale, linewidth=linewidth)
collection = Line3DCollection(segs, colors=colors, linewidth=linewidth, alpha=alpha)
ax.add_collection3d(collection)
_update_3d_datalim(ax, tree)
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA):
"""Generates a 3d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.plot_cylinder(ax,
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius,
color=color, alpha=alpha)
# unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually
_update_3d_datalim(ax, soma)
def plot_neuron3d(ax, nrn, neurite_type=NeuriteType.all,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
"""Generates a figure of the neuron, that contains a soma and a list of trees.
Args:
ax(matplotlib axes): on what to plot
nrn(neuron): neuron to be plotted
neurite_type(NeuriteType): an optional filter on the neurite type
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
"""
plot_soma3d(ax, nrn.soma, color=color, alpha=alpha)
for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):
plot_tree3d(ax, neurite,
diameter_scale=diameter_scale, linewidth=linewidth,
color=color, alpha=alpha)
ax.set_title(nrn.name)
def _get_dendrogram_legend(dendrogram):
"""Generates labels legend for dendrogram.
Because dendrogram is rendered as patches, we need to manually label it.
Args:
dendrogram (Dendrogram): dendrogram
Returns:
List of legend handles.
"""
def neurite_legend(neurite_type):
return Line2D([0], [0], color=TREE_COLOR[neurite_type], lw=2, label=neurite_type.name)
if dendrogram.neurite_type == NeuriteType.soma:
handles = {d.neurite_type: neurite_legend(d.neurite_type)
for d in [dendrogram] + dendrogram.children}
return handles.values()
return [neurite_legend(dendrogram.neurite_type)]
def _as_dendrogram_polygon(coords, color):
return Polygon(coords, color=color, fill=True)
def _as_dendrogram_line(start, end, color):
return FancyArrowPatch(start, end, arrowstyle='-', color=color, lw=2, shrinkA=0, shrinkB=0)
def _get_dendrogram_shapes(dendrogram, positions, show_diameters):
"""Generates drawable patches for dendrogram.
Args:
dendrogram (Dendrogram): dendrogram
positions (dict of Dendrogram: np.array): positions xy coordinates of dendrograms
show_diameter (bool): whether to draw shapes with diameter or as plain lines
Returns:
List of matplotlib.patches.
"""
color = TREE_COLOR[dendrogram.neurite_type]
start_point = positions[dendrogram]
end_point = start_point + [0, dendrogram.height]
if show_diameters:
shapes = [_as_dendrogram_polygon(dendrogram.coords + start_point, color)]
else:
shapes = [_as_dendrogram_line(start_point, end_point, color)]
for child in dendrogram.children:
shapes.append(_as_dendrogram_line(end_point, positions[child], color))
shapes += _get_dendrogram_shapes(child, positions, show_diameters)
return shapes
def plot_dendrogram(ax, obj, show_diameters=True):
"""Plots Dendrogram of `obj`.
Args:
ax: matplotlib axes
obj (neurom.Neuron, neurom.Tree): neuron or tree
show_diameters (bool): whether to show node diameters or not
"""
dendrogram = Dendrogram(obj)
positions = layout_dendrogram(dendrogram, np.array([0, 0]))
w, h = get_size(positions)
positions = move_positions(positions, np.array([.5 * w, 0]))
ax.set_xlim([-.05 * w, 1.05 * w])
ax.set_ylim([-.05 * h, 1.05 * h])
ax.set_title('Morphology Dendrogram')
ax.set_xlabel('micrometers (um)')
ax.set_ylabel('micrometers (um)')
shapes = _get_dendrogram_shapes(dendrogram, positions, show_diameters)
ax.add_collection(PatchCollection(shapes, match_original=True))
ax.set_aspect('auto')
ax.legend(handles=_get_dendrogram_legend(dendrogram))
| |
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DXWorkflow Handler
++++++++++++++++++
Workflows are data objects which contain metadata for a set of jobs to
be run together. They can be run by calling the
:func:`DXWorkflow.run` method.
"""
from __future__ import print_function, unicode_literals, division, absolute_import
import re
import dxpy
from ..system_requirements import SystemRequirementsDict
from ..bindings import DXDataObject, DXExecutable, DXAnalysis
from ..exceptions import DXError
from ..compat import basestring
##############
# DXWorkflow #
##############
def new_dxworkflow(title=None, summary=None, description=None, output_folder=None, init_from=None, **kwargs):
'''
:param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
:rtype: :class:`DXWorkflow`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`, except `details`.
Creates a new remote workflow object with project set to *project*
and returns the appropriate handler.
Example:
r = dxpy.new_dxworkflow(title="My Workflow", description="This workflow contains...")
Note that this function is shorthand for::
dxworkflow = DXWorkflow()
dxworkflow.new(**kwargs)
'''
dxworkflow = DXWorkflow()
dxworkflow.new(title=title, summary=summary, description=description, output_folder=output_folder, init_from=init_from, **kwargs)
return dxworkflow
class DXWorkflow(DXDataObject, DXExecutable):
'''
Remote workflow object handler. This class is used for the
workflow class data objects which produce an analysis when run.
'''
_class = "workflow"
_describe = staticmethod(dxpy.api.workflow_describe)
_add_types = staticmethod(dxpy.api.workflow_add_types)
_remove_types = staticmethod(dxpy.api.workflow_remove_types)
_get_details = staticmethod(dxpy.api.workflow_get_details)
_set_details = staticmethod(dxpy.api.workflow_set_details)
_set_visibility = staticmethod(dxpy.api.workflow_set_visibility)
_rename = staticmethod(dxpy.api.workflow_rename)
_set_properties = staticmethod(dxpy.api.workflow_set_properties)
_add_tags = staticmethod(dxpy.api.workflow_add_tags)
_remove_tags = staticmethod(dxpy.api.workflow_remove_tags)
_close = staticmethod(dxpy.api.workflow_close)
_list_projects = staticmethod(dxpy.api.workflow_list_projects)
def _new(self, dx_hash, **kwargs):
"""
:param dx_hash: Standard hash populated in :func:`dxpy.bindings.DXDataObject.new()` containing attributes common to all data object classes.
:type dx_hash: dict
:param title: Workflow title (optional)
:type title: string
:param summary: Workflow summary (optional)
:type summary: string
:param description: Workflow description (optional)
:type description: string
:param output_folder: Default output folder of the workflow (optional)
:type output_folder: string
:param stages: Stages of the workflow (optional)
:type stages: array of dictionaries
:param workflow_inputs: Workflow-level input specification (optional)
:type workflow_inputs: array of dictionaries
:param workflow_outputs: Workflow-level output specification (optional)
:type workflow_outputs: array of dictionaries
:param init_from: Another analysis workflow object handler or and analysis (string or handler) from which to initialize the metadata (optional)
:type init_from: :class:`~dxpy.bindings.dxworkflow.DXWorkflow`, :class:`~dxpy.bindings.dxanalysis.DXAnalysis`, or string (for analysis IDs only)
Create a new remote workflow object.
"""
def _set_dx_hash(kwargs, dxhash, key, new_key=None):
new_key = key if new_key is None else new_key
if key in kwargs:
if kwargs[key] is not None:
dxhash[new_key] = kwargs[key]
del kwargs[key]
if "init_from" in kwargs:
if kwargs["init_from"] is not None:
if not (isinstance(kwargs["init_from"], (DXWorkflow, DXAnalysis)) or \
(isinstance(kwargs["init_from"], basestring) and \
re.compile('^analysis-[0-9A-Za-z]{24}$').match(kwargs["init_from"]))):
raise DXError("Expected init_from to be an instance of DXWorkflow or DXAnalysis, or to be a string analysis ID.")
if isinstance(kwargs["init_from"], basestring):
dx_hash["initializeFrom"] = {"id": kwargs["init_from"]}
else:
dx_hash["initializeFrom"] = {"id": kwargs["init_from"].get_id()}
if isinstance(kwargs["init_from"], DXWorkflow):
dx_hash["initializeFrom"]["project"] = kwargs["init_from"].get_proj_id()
del kwargs["init_from"]
_set_dx_hash(kwargs, dx_hash, "title")
_set_dx_hash(kwargs, dx_hash, "summary")
_set_dx_hash(kwargs, dx_hash, "description")
_set_dx_hash(kwargs, dx_hash, "output_folder", "outputFolder")
_set_dx_hash(kwargs, dx_hash, "stages")
_set_dx_hash(kwargs, dx_hash, "workflow_inputs", "inputs")
_set_dx_hash(kwargs, dx_hash, "workflow_outputs", "outputs")
resp = dxpy.api.workflow_new(dx_hash, **kwargs)
self.set_ids(resp["id"], dx_hash["project"])
def _add_edit_version_to_request(self, request_hash, edit_version=None):
if edit_version is None:
request_hash["editVersion"] = self.editVersion
else:
request_hash["editVersion"] = edit_version
def _get_stage_id(self, stage):
'''
:param stage: A stage ID, name, or index (stage index is the number n for the nth stage, starting from 0; can be provided as an int or a string)
:type stage: int or string
:returns: The stage ID (this is a no-op if it was already a stage ID)
:raises: :class:`~dxpy.exceptions.DXError` if *stage* could not be parsed, resolved to a stage ID, or it could not be found in the workflow
'''
# first, if it is a string, see if it is an integer
if isinstance(stage, basestring):
try:
stage = int(stage)
except:
# we'll try parsing it as a string later
pass
if not isinstance(stage, basestring):
# Try to parse as stage index; ensure that if it's not a
# string that it is an integer at this point.
try:
stage_index = int(stage)
except:
raise DXError('DXWorkflow: the given stage identifier was neither a string stage ID nor an integer index')
if stage_index < 0 or stage_index >= len(self.stages):
raise DXError('DXWorkflow: the workflow contains ' + str(len(self.stages)) + \
' stage(s), and the numerical value of the given stage identifier is out of range')
return self.stages[stage_index].get("id")
if re.compile('^([a-zA-Z_]|stage-)[0-9a-zA-Z_]*$').match(stage) is not None:
# Check if there exists a stage with this stage id
stage_id_exists = any([stg['id'] for stg in self.stages if stg.get('id') == stage])
if stage_id_exists:
return stage
# A stage with the provided ID can't be found in the workflow, so look for it as a name
stage_ids_matching_name = [stg['id'] for stg in self.stages if stg.get('name') == stage]
if len(stage_ids_matching_name) == 0:
raise DXError('DXWorkflow: the given stage identifier ' + stage + ' could not be found as a stage ID nor as a stage name')
elif len(stage_ids_matching_name) > 1:
raise DXError('DXWorkflow: more than one workflow stage was found to have the name "' + stage + '"')
else:
return stage_ids_matching_name[0]
def add_stage(self, executable, stage_id=None, name=None, folder=None, stage_input=None, instance_type=None,
edit_version=None, **kwargs):
'''
:param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param stage_id: id for the stage (optional)
:type stage_id: string
:param name: name for the stage (optional)
:type name: string
:param folder: default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: ID of the added stage
:rtype: string
:raises: :class:`~dxpy.exceptions.DXError` if *executable* is not an expected type :class:`~dxpy.exceptions.DXAPIError` for errors thrown from the API call
Adds the specified executable as a new stage in the workflow.
'''
if isinstance(executable, basestring):
exec_id = executable
elif isinstance(executable, DXExecutable):
exec_id = executable.get_id()
else:
raise DXError("dxpy.DXWorkflow.add_stage: executable must be a string or an instance of DXApplet or DXApp")
add_stage_input = {"executable": exec_id}
if stage_id is not None:
add_stage_input["id"] = stage_id
if name is not None:
add_stage_input["name"] = name
if folder is not None:
add_stage_input["folder"] = folder
if stage_input is not None:
add_stage_input["input"] = stage_input
if instance_type is not None:
add_stage_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type).as_dict()
self._add_edit_version_to_request(add_stage_input, edit_version)
try:
result = dxpy.api.workflow_add_stage(self._dxid, add_stage_input, **kwargs)
finally:
self.describe() # update cached describe
return result['stage']
def get_stage(self, stage, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:returns: Hash of stage descriptor in workflow
'''
stage_id = self._get_stage_id(stage)
result = next((stage for stage in self.stages if stage['id'] == stage_id), None)
if result is None:
raise DXError('The stage ID ' + stage_id + ' could not be found')
return result
def remove_stage(self, stage, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
:returns: Stage ID that was removed
:rtype: string
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
remove_stage_input = {"stage": stage_id}
self._add_edit_version_to_request(remove_stage_input, edit_version)
try:
dxpy.api.workflow_remove_stage(self._dxid, remove_stage_input, **kwargs)
finally:
self.describe() # update cached describe
return stage_id
def move_stage(self, stage, new_index, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string of the stage index, name, or ID
:type stage: int or string
:param new_index: The new position in the order of stages that the specified stage should have (where 0 indicates the first stage)
:type new_index: int
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
move_stage_input = {"stage": stage_id,
"newIndex": new_index}
self._add_edit_version_to_request(move_stage_input, edit_version)
try:
dxpy.api.workflow_move_stage(self._dxid, move_stage_input, **kwargs)
finally:
self.describe() # update cached describe
def update(self, title=None, unset_title=False, summary=None, description=None,
output_folder=None, unset_output_folder=False,
workflow_inputs=None, unset_workflow_inputs=False,
workflow_outputs=None, unset_workflow_outputs=False,
stages=None, edit_version=None, **kwargs):
'''
:param title: workflow title to set; cannot be provided with *unset_title* set to True
:type title: string
:param unset_title: whether to unset the title; cannot be provided with string value for *title*
:type unset_title: boolean
:param summary: workflow summary to set
:type summary: string
:param description: workflow description to set
:type description: string
:param output_folder: new default output folder for the workflow
:type output_folder: string
:param unset_folder: whether to unset the default output folder; cannot be True with string value for *output_folder*
:type unset_folder: boolean
:param stages: updates to the stages to make; see API documentation for /workflow-xxxx/update for syntax of this field; use :meth:`update_stage()` to update a single stage
:type stages: dict
:param workflow_inputs: updates to the workflow input to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_inputs: dict
:param workflow_outputs: updates to the workflow output to make; see API documentation for /workflow-xxxx/update for syntax of this field
:type workflow_outputs: dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Make general metadata updates to the workflow
'''
update_input = {}
if title is not None and unset_title:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "title" and set "unset_title"')
if output_folder is not None and unset_output_folder:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "output_folder" and set "unset_output_folder"')
if workflow_inputs is not None and unset_workflow_inputs:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "workflow_inputs" and set "unset_workflow_inputs"')
if workflow_outputs is not None and unset_workflow_outputs:
raise DXError('dxpy.DXWorkflow.update: cannot provide both "workflow_outputs" and set "unset_workflow_outputs"')
if title is not None:
update_input["title"] = title
elif unset_title:
update_input["title"] = None
if summary is not None:
update_input["summary"] = summary
if description is not None:
update_input["description"] = description
if output_folder is not None:
update_input["outputFolder"] = output_folder
elif unset_output_folder:
update_input["outputFolder"] = None
if stages is not None:
update_input["stages"] = stages
if workflow_inputs is not None:
update_input["inputs"] = workflow_inputs
elif unset_workflow_inputs:
update_input["inputs"] = None
if workflow_outputs is not None:
update_input["outputs"] = workflow_outputs
elif unset_workflow_outputs:
update_input["outputs"] = None
# only perform update if there are changes to make
if update_input:
self._add_edit_version_to_request(update_input, edit_version)
try:
dxpy.api.workflow_update(self._dxid, update_input, **kwargs)
finally:
self.describe() # update cached describe
def update_stage(self, stage, executable=None, force=False,
name=None, unset_name=False, folder=None, unset_folder=False, stage_input=None,
instance_type=None, edit_version=None, **kwargs):
'''
:param stage: A number for the stage index (for the nth stage, starting from 0), or a string stage index, name, or ID
:type stage: int or string
:param executable: string or a handler for an app or applet
:type executable: string, DXApplet, or DXApp
:param force: whether to use *executable* even if it is incompatible with the previous executable's spec
:type force: boolean
:param name: new name for the stage; cannot be provided with *unset_name* set to True
:type name: string
:param unset_name: whether to unset the stage name; cannot be True with string value for *name*
:type unset_name: boolean
:param folder: new default output folder for the stage; either a relative or absolute path (optional)
:type folder: string
:param unset_folder: whether to unset the stage folder; cannot be True with string value for *folder*
:type unset_folder: boolean
:param stage_input: input fields to bind as default inputs for the executable (optional)
:type stage_input: dict
:param instance_type: Default instance type on which all jobs will be run for this stage, or a dict mapping function names to instance type requests
:type instance_type: string or dict
:param edit_version: if provided, the edit version of the workflow that should be modified; if not provided, the current edit version will be used (optional)
:type edit_version: int
Removes the specified stage from the workflow
'''
stage_id = self._get_stage_id(stage)
if name is not None and unset_name:
raise DXError('dxpy.DXWorkflow.update_stage: cannot provide both "name" and set "unset_name"')
if folder is not None and unset_folder:
raise DXError('dxpy.DXWorkflow.update_stage: cannot provide both "folder" and set "unset_folder"')
if executable is not None:
if isinstance(executable, basestring):
exec_id = executable
elif isinstance(executable, DXExecutable):
exec_id = executable.get_id()
else:
raise DXError("dxpy.DXWorkflow.update_stage: executable (if provided) must be a string or an instance of DXApplet or DXApp")
update_stage_exec_input = {"stage": stage_id,
"executable": exec_id,
"force": force}
self._add_edit_version_to_request(update_stage_exec_input, edit_version)
try:
dxpy.api.workflow_update_stage_executable(self._dxid, update_stage_exec_input, **kwargs)
finally:
self.describe() # update cached describe
# Construct hash and update the workflow's stage if necessary
update_stage_input = {}
if name is not None:
update_stage_input["name"] = name
elif unset_name:
update_stage_input["name"] = None
if folder:
update_stage_input["folder"] = folder
elif unset_folder:
update_stage_input["folder"] = None
if stage_input:
update_stage_input["input"] = stage_input
if instance_type is not None:
update_stage_input["systemRequirements"] = SystemRequirementsDict.from_instance_type(instance_type).as_dict()
if update_stage_input:
update_input = {"stages": {stage_id: update_stage_input}}
self._add_edit_version_to_request(update_input, edit_version)
try:
dxpy.api.workflow_update(self._dxid, update_input, **kwargs)
finally:
self.describe() # update cached describe
def is_locked(self):
return self._desc.get('inputs') is not None and self._desc.get('state') == 'closed'
def _get_input_name(self, input_str, region=None, describe_output=None):
'''
:param input_str: A string of one of the forms: "<exported input field name>", "<explicit workflow input field name>", "<stage ID>.<input field name>", "<stage index>.<input field name>", "<stage name>.<input field name>"
:type input_str: string
:returns: If the given form was one of those which uses the stage index or stage name, it is translated to the stage ID for use in the API call (stage name takes precedence)
'''
if '.' in input_str:
stage_identifier, input_name = input_str.split('.', 1)
# Try to parse as a stage ID or name
return self._get_stage_id(stage_identifier) + '.' + input_name
return input_str
def _get_effective_input(self, workflow_input):
effective_input = {}
for key in workflow_input:
input_name = self._get_input_name(key)
if input_name in effective_input:
raise DXError('DXWorkflow: the input for ' + input_name + ' was provided more than once')
effective_input[input_name] = workflow_input[key]
return effective_input
def _get_run_input(self, workflow_input, **kwargs):
effective_workflow_input = self._get_effective_input(workflow_input)
run_input = DXExecutable._get_run_input_common_fields(effective_workflow_input, **kwargs)
if kwargs.get('stage_instance_types') is not None:
run_input['stageSystemRequirements'] = {}
for stage, value in kwargs['stage_instance_types'].items():
if stage != '*':
stage = self._get_stage_id(stage)
run_input['stageSystemRequirements'][stage] = SystemRequirementsDict.from_instance_type(value).as_dict()
if kwargs.get('stage_folders') is not None:
run_input['stageFolders'] = {}
for stage, value in kwargs['stage_folders'].items():
if stage != '*':
stage = self._get_stage_id(stage)
run_input['stageFolders'][stage] = value
if kwargs.get('rerun_stages') is not None:
run_input['rerunStages'] = [
_stage if _stage == '*' else self._get_stage_id(_stage)
for _stage in kwargs['rerun_stages']
]
if kwargs.get('ignore_reuse', False):
run_input['ignoreReuse'] = ['*']
if kwargs.get('ignore_reuse_stages') is not None:
run_input['ignoreReuse'] = [
_stage if _stage == '*' else self._get_stage_id(_stage)
for _stage in kwargs['ignore_reuse_stages']
]
return run_input
def _run_impl(self, run_input, **kwargs):
return DXAnalysis(dxpy.api.workflow_run(self._dxid, run_input, **kwargs)["id"])
def run(self, workflow_input, *args, **kwargs):
'''
:param workflow_input: Dictionary of the workflow's input arguments; see below for more details
:type workflow_input: dict
:param instance_type: Instance type on which all stages' jobs will be run, or a dict mapping function names to instance types. These may be overridden on a per-stage basis if stage_instance_types is specified.
:type instance_type: string or dict
:param stage_instance_types: A dict mapping stage IDs, names, or indices to either a string (representing an instance type to be used for all functions in that stage), or a dict mapping function names to instance types.
:type stage_instance_types: dict
:param stage_folders: A dict mapping stage IDs, names, indices, and/or the string "*" to folder values to be used for the stages' output folders (use "*" as the default for all unnamed stages)
:type stage_folders: dict
:param rerun_stages: A list of stage IDs, names, indices, and/or the string "*" to indicate which stages should be run even if there are cached executions available
:type rerun_stages: list of strings
:param ignore_reuse_stages: Stages of a workflow (IDs, names, or indices) or "*" for which job reuse should be disabled
:type ignore_reuse_stages: list
:returns: Object handler of the newly created analysis
:rtype: :class:`~dxpy.bindings.dxanalysis.DXAnalysis`
Run the associated workflow. See :meth:`dxpy.bindings.dxapplet.DXExecutable.run` for additional args.
When providing input for the workflow, keys should be of one of the following forms:
* "N.name" where *N* is the stage number, and *name* is the
name of the input, e.g. "0.reads" if the first stage takes
in an input called "reads"
* "stagename.name" where *stagename* is the stage name, and
*name* is the name of the input within the stage
* "stageID.name" where *stageID* is the stage ID, and *name*
is the name of the input within the stage
* "name" where *name* is the name of a workflow level input
(defined in inputs) or the name that has been
exported for the workflow (this name will appear as a key
in the "inputSpec" of this workflow's description if it has
been exported for this purpose)
'''
return super(DXWorkflow, self).run(workflow_input, *args, **kwargs)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource info registry."""
from googlecloudsdk.core.resource import resource_exceptions
from googlecloudsdk.core.resource import resource_info
RESOURCE_REGISTRY = {
# appengine
'appengine.instances':
resource_info.ResourceInfo(
list_format="""
table(
service:sort=1,
version:sort=2,
id:sort=3,
instance.vmStatus.yesno(no="N/A"),
instance.vmDebugEnabled.yesno(yes="YES", no=""):label=DEBUG_MODE
)
""",),
'appengine.module_versions':
resource_info.ResourceInfo(
list_format="""
table(
module,
version,
traffic_split.format("{0:.2f}", .)
)
""",),
'appengine.regions':
resource_info.ResourceInfo(
list_format="""
table(
region:sort=1,
standard.yesno(yes="YES", no="NO"):label='SUPPORTS STANDARD',
flexible.yesno(yes="YES", no="NO"):label='SUPPORTS FLEXIBLE'
)
""",),
'appengine.services':
resource_info.ResourceInfo(
list_format="""
table(
id:label=SERVICE:sort=1,
versions.len():label=NUM_VERSIONS
)
""",),
'appengine.versions':
resource_info.ResourceInfo(
list_format="""
table(
service,
id:label=VERSION,
traffic_split.format("{0:.2f}", .),
last_deployed_time.date("%Y-%m-%dT%H:%M:%S%Oz", undefined="-")
:label=LAST_DEPLOYED,
version.servingStatus:label=SERVING_STATUS
)
""",),
# bigtable
'bigtable.clusters.list.alpha':
resource_info.ResourceInfo(
list_format="""
table[box](
displayName:label=NAME,
clusterId:label=ID,
zoneId:label=ZONE,
serveNodes:label=NODES
)
""",),
'bigtable.clusters.list':
resource_info.ResourceInfo(
list_format="""
table(
name.segment(3):sort=1:label=INSTANCE,
name.basename():sort=2:label=NAME,
location.basename():label=ZONE,
serveNodes:label=NODES,
defaultStorageType:label=STORAGE,
state
)
""",),
'bigtable.instances.list':
resource_info.ResourceInfo(
list_format="""
table(
name.basename():sort=1,
displayName,
state
)
""",),
# bio
'bio.projects.operations':
resource_info.ResourceInfo(
list_format="""
table(
name.basename(),
metadata.request.'@type'.split('.').slice(-1:):label=TYPE,
metadata.request.workflowName,
metadata.createTime.date(),
done,
error.code:label=ERROR_CODE,
format('{0:40}', error.message):label=ERROR_MESSAGE
)
""",),
# cloud billing
'cloudbilling.billingAccounts':
resource_info.ResourceInfo(
cache_command='billing accounts list',
# TODO(b/22402915) Delete this when OP resource completion is
# supported.
bypass_cache=True,
list_format="""
table(
name.basename():label=ID,
displayName:label=NAME,
open
)
""",),
'cloudbilling.projectBillingInfo':
resource_info.ResourceInfo(
list_format="""
table(
projectId,
billingAccountName.basename():label=BILLING_ACCOUNT_ID,
billingEnabled
)
""",),
# cloud build
'cloudbuild.projects.builds':
resource_info.ResourceInfo(
cache_command='cloud build list',
bypass_cache=True,
async_collection='cloudbuild.projects.builds',
list_format="""
table(
id,
createTime.date('%Y-%m-%dT%H:%M:%S%Oz', undefined='-'),
duration(start=startTime,end=finishTime,precision=0,calendar=false,undefined=" -").slice(2:).join(""):label=DURATION,
build_source(undefined="-"):label=SOURCE,
build_images(undefined="-"):label=IMAGES,
status
)
""",),
# cloud key management system
'cloudkms.projects.locations':
resource_info.ResourceInfo(
list_format="""
table(
locationId
)
""",),
'cloudkms.projects.locations.keyRings':
resource_info.ResourceInfo(
list_format="""
table(
name
)
""",),
'cloudkms.projects.locations.keyRings.cryptoKeys':
resource_info.ResourceInfo(
list_format="""
table(
name,
purpose,
primary.state:label=PRIMARY_STATE
)
""",),
'cloudkms.projects.locations.keyRings.cryptoKeys.cryptoKeyVersions':
resource_info.ResourceInfo(
list_format="""
table(
name,
state
)
""",),
# cloud resource manager
'cloudresourcemanager.folders':
resource_info.ResourceInfo(
async_collection='cloudresourcemanager.operations',
list_format="""
table(
displayName,
name:sort=101,
parent
)
""",),
'cloudresourcemanager.projects':
resource_info.ResourceInfo(
cache_command='projects list',
list_format="""
table(
projectId:sort=1,
name,
projectNumber
)
""",),
'cloudresourcemanager.operations':
resource_info.ResourceInfo(
list_format="""
table(
name:sort=101,
done,
response,
metadata,
error
)
""",),
'cloudresourcemanager.organizations':
resource_info.ResourceInfo(
cache_command='organizations list',
list_format="""
table(
displayName,
organizationId:sort=1,
owner.directoryCustomerId
)
""",),
'cloudresourcemanager.liens':
resource_info.ResourceInfo(list_format="""
table(
name.segment(),
origin,
reason
)
"""),
# Cloud SDK client side resources
# 'coudsdk.*': ...
# compute
'compute.addresses':
resource_info.ResourceInfo(
cache_command='compute addresses list',
list_format="""
table(
name,
region.basename(),
address,
status
)
""",),
'compute.autoscalers':
resource_info.ResourceInfo(
async_collection='compute.operations',
cache_command='compute autoscaler list',
list_format="""
table(
name,
target.basename(),
autoscalingPolicy.policy():label=POLICY
)
""",),
'compute.backendBuckets':
resource_info.ResourceInfo(
list_format="""
table(
name,
bucketName:label=GCS_BUCKET_NAME,
enableCdn
)
""",),
'compute.backendServiceGroupHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.backendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol
)
""",),
'compute.backendServices.alpha':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.regionBackendServices':
resource_info.ResourceInfo(
cache_command='compute backend-services list',
list_format="""
table(
name,
backends[].group.list():label=BACKENDS,
protocol,
loadBalancingScheme,
healthChecks.map().basename().list()
)
""",),
'compute.disks':
resource_info.ResourceInfo(
cache_command='compute disks list',
list_format="""
table(
name,
zone.basename(),
sizeGb,
type.basename(),
status
)
""",),
'compute.diskTypes':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
zone.basename(),
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.diskTypes.alpha':
resource_info.ResourceInfo(
cache_command='compute disk-types list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
validDiskSize:label=VALID_DISK_SIZES
)
""",),
'compute.firewalls':
resource_info.ResourceInfo(
cache_command='compute firewall-rules list',
list_format="""
table(
name,
network.basename(),
sourceRanges.list():label=SRC_RANGES,
allowed[].map().firewall_rule().list():label=RULES,
sourceTags.list():label=SRC_TAGS,
targetTags.list():label=TARGET_TAGS
)
""",),
'compute.firewalls.alpha':
resource_info.ResourceInfo(
cache_command='compute firewall-rules list',
list_format="""
table(
name,
network.basename(),
direction,
priority,
sourceRanges.list():label=SRC_RANGES,
destinationRanges.list():label=DEST_RANGES,
allowed[].map().firewall_rule().list():label=ALLOW,
denied[].map().firewall_rule().list():label=DENY,
sourceTags.list():label=SRC_TAGS,
targetTags.list():label=TARGET_TAGS
)
""",),
'compute.forwardingRules':
resource_info.ResourceInfo(
cache_command='compute forwarding-rules list',
list_format="""
table(
name,
region.basename(),
IPAddress,
IPProtocol,
firstof(
target,
backendService).scope():label=TARGET
)
""",),
'compute.groups':
resource_info.ResourceInfo(
cache_command='compute groups list',
list_format="""
table(
name,
members.len():label=NUM_MEMBERS,
description
)
""",),
'compute.healthChecks':
resource_info.ResourceInfo(
cache_command='compute health-checks list',
list_format="""
table(
name,
type:label=PROTOCOL
)
""",),
'compute.hosts':
resource_info.ResourceInfo(
cache_command='compute sole-tenancy hosts list',
list_format="""
table(
name,
zone.basename(),
instances.len():label=INSTANCES,
status
)
""",),
'compute.hostTypes':
resource_info.ResourceInfo(
cache_command='compute sole-tenancy host-types list',
list_format="""
table(
name,
zone.basename(),
guestCpus:label=CPUs,
memoryMb,
localSsdGb,
deprecated.state:label=DEPRECATED
)
""",),
'compute.httpHealthChecks':
resource_info.ResourceInfo(
cache_command='compute http-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.httpsHealthChecks':
resource_info.ResourceInfo(
cache_command='compute https-health-checks list',
list_format="""
table(
name,
host,
port,
requestPath
)
""",),
'compute.images':
resource_info.ResourceInfo(
cache_command='compute images list',
list_format="""
table(
name,
selfLink.map().scope(projects).segment(0):label=PROJECT,
family,
deprecated.state:label=DEPRECATED,
status
)
""",),
'compute.instanceGroups':
resource_info.ResourceInfo(
cache_command='compute instance-groups list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
network.basename(),
isManaged:label=MANAGED,
size:label=INSTANCES
)
""",),
'compute.instanceGroupManagers':
resource_info.ResourceInfo(
cache_command='compute instance-groups managed list',
list_format="""
table(
name,
location():label=LOCATION,
location_scope():label=SCOPE,
baseInstanceName,
size,
targetSize,
instanceTemplate.basename(),
autoscaled
)
""",),
'compute.instances':
resource_info.ResourceInfo(
cache_command='compute instances list',
list_format="""
table(
name,
zone.basename(),
machineType.machine_type(),
scheduling.preemptible.yesno(yes=true, no=''),
networkInterfaces[].networkIP.notnull().list():label=INTERNAL_IP,
networkInterfaces[].accessConfigs[0].natIP.notnull().list()\
:label=EXTERNAL_IP,
status
)
""",),
'compute.instanceTemplates':
resource_info.ResourceInfo(
cache_command='compute instance-templates list',
list_format="""
table(
name,
properties.machineType.machine_type(),
properties.scheduling.preemptible.yesno(yes=true, no=''),
creationTimestamp
)
""",),
'compute.invalidations':
resource_info.ResourceInfo(
cache_command='beta compute url-maps list-cdn-cache-invalidations',
list_format="""
table(
description,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",),
'compute.machineTypes':
resource_info.ResourceInfo(
cache_command='compute machine-types list',
list_format="""
table(
name,
zone.basename(),
guestCpus:label=CPUS,
memoryMb.size(units_in=MiB, units_out=GiB, precision=2):label=MEMORY_GB,
deprecated.state:label=DEPRECATED
)
""",),
'compute.networks':
resource_info.ResourceInfo(
cache_command='compute networks list',
list_format="""
table(
name,
x_gcloud_mode:label=MODE,
IPv4Range:label=IPV4_RANGE,
gatewayIPv4
)
""",),
'compute.operations':
resource_info.ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
targetLink.scope():label=TARGET,
operation_http_status():label=HTTP_STATUS,
status,
insertTime:label=TIMESTAMP
)
""",),
'compute.peerings':
resource_info.ResourceInfo(
cache_command='alpha compute networks peerings list',
list_format="""
table(
name,
source_network.basename():label=NETWORK,
network.map().scope(projects).segment(0):label=PEER_PROJECT,
network.basename():label=PEER_NETWORK,
autoCreateRoutes,
state,
stateDetails
)
""",),
'compute.projects':
resource_info.ResourceInfo(
list_format="""
value(
format("There is no API support yet.")
)
""",),
'compute.xpnProjects':
resource_info.ResourceInfo(
list_format="""
table(
name,
creationTimestamp,
xpnProjectStatus
)
""",),
'compute.xpnResourceId':
resource_info.ResourceInfo(
list_format="""
table(
id:label=RESOURCE_ID,
type:label=RESOURCE_TYPE)
""",),
'compute.regions':
resource_info.ResourceInfo(
cache_command='compute regions list',
list_format="""
table(
name,
quotas.metric.CPUS.quota():label=CPUS,
quotas.metric.DISKS_TOTAL_GB.quota():label=DISKS_GB,
quotas.metric.IN_USE_ADDRESSES.quota():label=ADDRESSES,
quotas.metric.STATIC_ADDRESSES.quota():label=RESERVED_ADDRESSES,
status():label=STATUS,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
'compute.routers':
resource_info.ResourceInfo(
cache_command='compute routers list',
list_format="""
table(
name,
region.basename(),
network.basename()
)
""",),
'compute.routes':
resource_info.ResourceInfo(
cache_command='compute routes list',
list_format="""
table(
name,
network.basename(),
destRange,
firstof(
nextHopInstance,
nextHopGateway,
nextHopIp,
nextHopVpnTunnel,
nextHopPeering).scope()
:label=NEXT_HOP,
priority
)
""",),
'compute.snapshots':
resource_info.ResourceInfo(
cache_command='compute snapshots list',
list_format="""
table(
name,
diskSizeGb,
sourceDisk.scope():label=SRC_DISK,
status
)
""",),
'compute.sslCertificates':
resource_info.ResourceInfo(
cache_command='compute ssl-certificates list',
list_format="""
table(
name,
creationTimestamp
)
""",),
'compute.subnetworks':
resource_info.ResourceInfo(
cache_command='compute networks subnets list',
list_format="""
table(
name,
region.basename(),
network.basename(),
ipCidrRange:label=RANGE
)
""",),
'compute.targetHttpProxies':
resource_info.ResourceInfo(
cache_command='compute target-http-proxies list',
list_format="""
table(
name,
urlMap.basename()
)
""",),
'compute.targetHttpsProxies':
resource_info.ResourceInfo(
cache_command='compute target-https-proxies list',
list_format="""
table(
name,
sslCertificates.map().basename().list():label=SSL_CERTIFICATES,
urlMap.basename()
)
""",),
'compute.targetInstances':
resource_info.ResourceInfo(
cache_command='compute target-instances list',
list_format="""
table(
name,
zone.basename(),
instance.basename(),
natPolicy
)
""",),
'compute.targetPoolInstanceHealth':
resource_info.ResourceInfo(
list_format="""
default
""",),
'compute.targetPools':
resource_info.ResourceInfo(
cache_command='compute target-pools list',
list_format="""
table(
name,
region.basename(),
sessionAffinity,
backupPool.basename():label=BACKUP,
healthChecks[].map().basename().list():label=HEALTH_CHECKS
)
""",),
'compute.targetSslProxies':
resource_info.ResourceInfo(
cache_command='compute target-ssl-proxies list',),
'compute.targetTcpProxies':
resource_info.ResourceInfo(
cache_command='compute target-tcp-proxies list',),
'compute.targetVpnGateways':
resource_info.ResourceInfo(
cache_command='compute target-vpn-gateways list',
list_format="""
table(
name,
network.basename(),
region.basename()
)
""",),
'compute.urlMaps':
resource_info.ResourceInfo(
cache_command='compute url-maps list',
list_format="""
table(
name,
defaultService
)
""",),
'compute.users':
resource_info.ResourceInfo(
cache_command='compute users list',
list_format="""
table(
name,
owner,
description
)
""",),
'compute.vpnTunnels':
resource_info.ResourceInfo(
cache_command='compute vpn-tunnels list',
list_format="""
table(
name,
region.basename(),
targetVpnGateway.basename():label=GATEWAY,
peerIp:label=PEER_ADDRESS
)
""",),
'compute.zones':
resource_info.ResourceInfo(
cache_command='compute zones list',
list_format="""
table(
name,
region.basename(),
status():label=STATUS,
maintenanceWindows.next_maintenance():label=NEXT_MAINTENANCE,
deprecated.deleted:label=TURNDOWN_DATE
)
""",),
# container
'container.images':
resource_info.ResourceInfo(
list_format="""
table(
name
)
""",),
'container.tags':
resource_info.ResourceInfo(
list_format="""
table(
digest.slice(7:19).join(''),
tags.list(),
timestamp.date(),
BUILD_DETAILS.buildDetails.provenance.sourceProvenance.sourceContext.context.cloudRepo.revisionId.notnull().list().slice(:8).join(''):label=GIT_SHA,
PACKAGE_VULNERABILITY.vulnerabilityDetails.severity.notnull().count().list():label=VULNERABILITIES,
IMAGE_BASIS.derivedImage.sort(distance).map().extract(baseResourceUrl).slice(:1).map().list().list().split('//').slice(1:).list().split('@').slice(:1).list():label=FROM,
BUILD_DETAILS.buildDetails.provenance.id.notnull().list():label=BUILD
)
""",),
'container.projects.zones.clusters':
resource_info.ResourceInfo(
async_collection='container.projects.zones.clusters',
list_format="""
table(
name,
zone,
master_version():label=MASTER_VERSION,
endpoint:label=MASTER_IP,
nodePools[0].config.machineType,
currentNodeVersion:label=NODE_VERSION,
currentNodeCount:label=NUM_NODES,
status
)
""",),
'container.projects.zones.clusters.nodePools':
resource_info.ResourceInfo(
list_format="""
table(
name,
config.machineType,
config.diskSizeGb,
version:label=NODE_VERSION
)
""",),
'container.projects.zones.operations':
resource_info.ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
zone,
targetLink.basename():label=TARGET,
statusMessage,
status
)
""",),
# dataflow
'dataflow.jobs':
resource_info.ResourceInfo(
list_format="""
table(
id:label=ID,
name:label=NAME,
type:label=TYPE,
creationTime.yesno(no="-"),
state
)
""",),
'dataflow.logs':
resource_info.ResourceInfo(
list_format="""
table[no-heading,pad=1](
messageImportance.enum(dataflow.JobMessage),
time.date(tz=LOCAL):label=TIME,
id,
messageText:label=TEXT
)
""",),
# dataproc
'dataproc.clusters':
resource_info.ResourceInfo(
list_format="""
table(
clusterName:label=NAME,
config.workerConfig.numInstances:label=WORKER_COUNT,
status.state:label=STATUS,
config.gceClusterConfig.zoneUri.scope(zone):label=ZONE
)
""",),
'dataproc.jobs':
resource_info.ResourceInfo(
async_collection='dataproc.operations',
list_format="""
table(
reference.jobId,
type.yesno(no="-"),
status.state:label=STATUS
)
""",),
'dataproc.operations':
resource_info.ResourceInfo(
list_format="""
table(
name:label=OPERATION_NAME,
done
)
""",),
# debug
'debug.logpoints':
resource_info.ResourceInfo(
list_format="""
table(
userEmail.if(all_users),
location,
condition,
logLevel,
logMessageFormat,
id,
full_status():label=STATUS)
:(isFinalState:sort=1, createTime:sort=2)
""",),
'debug.logpoints.create':
resource_info.ResourceInfo(
list_format="""
list(
format("id: {0}", id),
format("location: {0}", location),
format("logLevel: {0}", logLevel),
format("logMessageFormat: {0}", logMessageFormat),
format("condition: {0}", condition),
format("logViewUrl: {0}", logViewUrl),
format("status: {0}", full_status())
)
""",),
'debug.snapshots':
resource_info.ResourceInfo(list_format="""
table(
short_status():label=STATUS,
userEmail.if(all_users),
location,
condition,
finalTime.if(include_inactive != 0):label=COMPLETED_TIME,
id,
consoleViewUrl:label=VIEW
)
"""),
'debug.snapshots.create':
resource_info.ResourceInfo(list_format="""
list(
format("id: {0}", id),
format("location: {0}", location),
format("status: {0}", full_status()),
format("consoleViewUrl: {0}", consoleViewUrl)
)
"""),
'debug.targets':
resource_info.ResourceInfo(list_format="""
table(
name,
target_id:label=ID,
description
)
"""),
# deployment manager v2
'deploymentmanager.deployments':
resource_info.ResourceInfo(
list_format="""
table(
name,
operation.operationType:label=LAST_OPERATION_TYPE,
operation.status,
description,
manifest.basename(),
operation.error.errors.group(code)
)
""",),
'deploymentmanager.operations':
resource_info.ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
status,
targetLink.basename():label=TARGET,
error.errors.group(code)
)
""",),
'deploymentmanager.resources':
resource_info.ResourceInfo(
async_collection='deploymentmanager.operations',
list_format="""
table(
name,
type,
update.state.yesno(no="COMPLETED"),
update.error.errors.group(code),
update.intent
)
""",),
'deploymentmanager.resources_and_outputs':
resource_info.ResourceInfo(
async_collection='deploymentmanager.operations',
list_format="""
table(
resources:format='table(
name,
type,
update.state.yesno(no="COMPLETED"),
update.error.errors.group(code),
update.intent)',
outputs:format='table(
name:label=OUTPUTS,
finalValue:label=VALUE)'
)
""",),
'deploymentmanager.deployments_and_resources_and_outputs':
resource_info.ResourceInfo(
list_format="""
table(
deployment:format='default(name, id, description, fingerprint,
insertTime, manifest.basename(), labels, operation.operationType,
operation.name, operation.progress, operation.status,
operation.user, operation.endTime, operation.startTime,
operation.error, update)',
resources:format='table(
name:label=NAME,
type:label=TYPE,
update.state.yesno(no="COMPLETED"),
update.intent)',
outputs:format='table(
name:label=OUTPUTS,
finalValue:label=VALUE)'
)
""",),
'deploymentmanager.types':
resource_info.ResourceInfo(
list_format="""
table(
name
)
""",),
'deploymentmanager.type_providers':
resource_info.ResourceInfo(
async_collection='deploymentmanager.operations',
list_format="""
table(
name,
insertTime.date(format="%Y-%m-%d"):label=INSERT_DATE
)
""",),
# dns
'dns.changes':
resource_info.ResourceInfo(
list_format="""
table(
id,
startTime,
status
)
""",),
'dns.managedZones':
resource_info.ResourceInfo(
cache_command='dns managed-zones list',
list_format="""
table(
name,
dnsName,
description
)
""",),
'dns.resourceRecordSets':
resource_info.ResourceInfo(
list_format="""
table(
name,
type,
ttl,
rrdatas.list():label=DATA
)
""",),
# functions
'functions.projects.locations':
resource_info.ResourceInfo(
list_format="""
table(
name
)
""",),
'functions.projects.locations.functions':
resource_info.ResourceInfo(
list_format="""
table(
name.basename(),
status,
trigger():label=TRIGGER
)
""",),
# genomics
'genomics.alignments':
resource_info.ResourceInfo(
list_format="""
table(
alignment.position.referenceName,
alignment.position.position,
alignment.position.reverseStrand,
fragmentName,
alignedSequence:label=SEQUENCE
)
""",),
'genomics.callSets':
resource_info.ResourceInfo(
list_format="""
table(
id,
name,
variantSetIds.list()
)
""",),
'genomics.datasets':
resource_info.ResourceInfo(
list_format="""
table(
id,
name
)
""",),
'genomics.readGroupSets':
resource_info.ResourceInfo(
list_format="""
table(
id,
name,
referenceSetId
)
""",),
'genomics.references':
resource_info.ResourceInfo(
list_format="""
table(
id,
name,
length,
sourceUri,
sourceAccessions.list():label=ACCESSIONS
)
""",),
'genomics.referenceSets':
resource_info.ResourceInfo(
list_format="""
table(
id,
assemblyId,
sourceAccessions.list()
)
""",),
'genomics.variants':
resource_info.ResourceInfo(
list_format="""
table(
variantSetId,
referenceName,
start,
end,
referenceBases,
alternateBases
)
""",),
'genomics.variantsets':
resource_info.ResourceInfo(
list_format="""
table(
id,
name,
description
)
""",),
# iam
'iam.service_accounts':
resource_info.ResourceInfo(
list_format="""
table(
displayName:label=NAME,
email
)
""",),
'iam.service_accounts.keys':
resource_info.ResourceInfo(
list_format="""
table(
name.scope(keys):label=KEY_ID,
validAfterTime:label=CREATED_AT,
validBeforeTime:label=EXPIRES_AT
)
""",),
# logging
'logging.logs':
resource_info.ResourceInfo(
list_format="""
table(
name.scope(logs):label=ID
)
""",),
'logging.metrics':
resource_info.ResourceInfo(
list_format="""
table(
name,
description,
filter,
version
)
""",),
'logging.resourceDescriptors':
resource_info.ResourceInfo(
list_format="""
table(
type,
description,
labels[].key.list()
)
""",),
'logging.sinks':
resource_info.ResourceInfo(
list_format="""
table(
name,
destination,
type,
format,
filter
)
""",),
# ml
'ml.operations':
resource_info.ResourceInfo(
list_format="""
table(
name
)
""",),
'ml.beta.jobs':
resource_info.ResourceInfo(
list_format="""
table(
jobId.basename(),
state:label=STATUS,
createTime.date(tz=LOCAL):label=CREATED
)
""",),
'ml.models.versions':
resource_info.ResourceInfo(
async_collection='ml.operations',
list_format="""
table(
name.basename(),
deploymentUri
)
""",),
'ml.models':
resource_info.ResourceInfo(
list_format="""
table(
name.basename(),
defaultVersion.name.basename()
)
""",),
# projects
'developerprojects.projects':
resource_info.ResourceInfo(
list_format="""
table(
projectId,
title,
projectNumber
)
""",),
# pubsub
'pubsub.projects.topics':
resource_info.ResourceInfo(
list_format="""
table[box](
topicId:label=TOPIC,
success:label=SUCCESS,
reason:label=REASON
)
""",),
'pubsub.topics.publish':
resource_info.ResourceInfo(
list_format="""
table[box](
messageIds:label=MESSAGE_ID,
)
""",),
'pubsub.projects.subscriptions':
resource_info.ResourceInfo(
list_format="""
table[box](
subscriptionId:label=SUBSCRIPTION,
topic:label=TOPIC,
type,
pushEndpoint:label=PUSH_ENDPOINT,
ackDeadlineSeconds:label=ACK_DEADLINE,
retainAckedMessages:label=RETAIN_ACKED_MESSAGES,
messageRetentionDuration:label=MESSAGE_RETENTION_DURATION,
success:label=SUCCESS,
reason:label=REASON
)
""",),
'pubsub.subscriptions.ack':
resource_info.ResourceInfo(
list_format="""
table[box](
subscriptionId:label=SUBSCRIPTION,
ackIds:label=ACK_IDS
)
""",),
'pubsub.subscriptions.mod_ack':
resource_info.ResourceInfo(
list_format="""
table[box](
subscriptionId:label=SUBSCRIPTION,
ackId:label=ACK_ID,
ackDeadlineSeconds:label=ACK_DEADLINE
)
""",),
'pubsub.subscriptions.mod_config':
resource_info.ResourceInfo(
list_format="""
table[box](
subscriptionId:label=SUBSCRIPTION,
pushEndpoint:label=PUSH_ENDPOINT
)
""",),
'pubsub.subscriptions.pull':
resource_info.ResourceInfo(
list_format="""
table[box](
message.data.decode(base64),
message.messageId,
message.attributes.list(separator=' '),
ackId.if(NOT auto_ack)
)
""",),
'pubsub.subscriptions.list':
resource_info.ResourceInfo(
list_format="""
table[box](
projectId:label=PROJECT,
subscriptionId:label=SUBSCRIPTION,
topicId:label=TOPIC,
type,
ackDeadlineSeconds:label=ACK_DEADLINE
)
""",),
'pubsub.projects.snapshots':
resource_info.ResourceInfo(
list_format="""
table[box](
snapshotId:label=SNAPSHOT,
topicId:label=TOPIC,
exipirationTime:label=EXPIRATION_TIME,
success:label=SUCCESS,
reason:label=REASON
)
""",),
'pubsub.snapshots.list':
resource_info.ResourceInfo(
list_format="""
table[box](
projectId:label=PROJECT,
snapshotId:label=SNAPSHOT,
topicId:label=TOPIC,
expirationTime:label=EXPIRATION_TIME
)
""",),
'pubsub.subscriptions.seek':
resource_info.ResourceInfo(
list_format="""
table[box](
time:label=TIME,
snapshotId:label=SNAPSHOT
subscriptionId:label=SUBSCRIPTION,
)
""",),
'replicapoolupdater.rollingUpdates':
resource_info.ResourceInfo(
list_format="""
table(
id,
instanceGroupManager.basename():label=GROUP_NAME,
instanceTemplate.basename():label=TEMPLATE_NAME,
status,
statusMessage
)
""",),
'replicapoolupdater.rollingUpdates.instanceUpdates':
resource_info.ResourceInfo(
list_format="""
table(
instance.basename():label=INSTANCE_NAME,
status
)
""",),
# runtime config
'runtimeconfig.configurations':
resource_info.ResourceInfo(
list_format="""
table(
name,
description
)
""",),
'runtimeconfig.variables':
resource_info.ResourceInfo(
list_format="""
table(
name,
updateTime.date()
)
""",),
'runtimeconfig.waiters':
resource_info.ResourceInfo(
async_collection='runtimeconfig.waiters',
list_format="""
table(
name,
createTime.date(),
waiter_status(),
error.message
)
""",),
# service management (inception)
'servicemanagement-v1.services':
resource_info.ResourceInfo(
bypass_cache=True,
list_format="""
table(
serviceName:label=NAME,
serviceConfig.title
)
""",),
'servicemanagement-v1.serviceConfigs':
resource_info.ResourceInfo(
list_format="""
table(
id:label=CONFIG_ID,
name:label=SERVICE_NAME
)
""",),
# service registry
'service_registry.endpoints':
resource_info.ResourceInfo(
async_collection='service_registry.operations',
list_format="""
table[box](
name,
state,
addresses[].map().endpoint_address().list(separator=' | '):label=ADDRESSES
)
""",),
'service_registry.operations':
resource_info.ResourceInfo(
list_format="""
table(
name,
operationType:label=TYPE,
status,
targetLink.basename():label=TARGET,
insertTime.date(format="%Y-%m-%d"):label=DATE,
error.errors.group(code, message)
)
""",),
# source
'source.captures':
resource_info.ResourceInfo(
list_format="""
table(
project_id,
id:label=CAPTURE_ID
)
""",),
'source.captures.upload':
resource_info.ResourceInfo(
list_format="""
flattened(capture.id, context_file, extended_context_file)
""",),
'source.jobs':
resource_info.ResourceInfo(
list_format="""
table(
name.yesno(no="default"):label=REPO_NAME,
projectId,
vcs,
state,
createTime
)
""",),
# spanner
'spanner.databases':
resource_info.ResourceInfo(
list_format="""
table(
name.basename(),
state
)
""",),
'spanner.instanceConfigs':
resource_info.ResourceInfo(
list_format="""
table(
name.basename(),
displayName
)
""",),
'spanner.instances':
resource_info.ResourceInfo(
list_format="""
table(
name.basename(),
displayName,
config.basename(),
nodeCount,
state
)
""",),
'spanner.operations':
resource_info.ResourceInfo(
list_format="""
table(
name.basename():label=OPERATION_ID,
metadata.statements.join(sep="\n"),
done,
metadata.'@type'.split('.').slice(-1:).join()
)
""",),
# sql
'sql.databases':
resource_info.ResourceInfo(
list_format="""
table(
name,
charset,
collation
)
""",),
'sql.backupRuns':
resource_info.ResourceInfo(
list_format="""
table(
dueTime.iso(),
error.code.yesno(no="-"):label=ERROR,
status
)
""",),
'sql.backupRuns.v1beta4':
resource_info.ResourceInfo(
list_format="""
table(
id,
windowStartTime.iso(),
error.code.yesno(no="-"):label=ERROR,
status
)
""",),
'sql.flags':
resource_info.ResourceInfo(
list_format="""
table(
name,
type,
appliesTo.list():label=DATABASE_VERSION,
allowedStringValues.list():label=ALLOWED_VALUES
)
""",),
'sql.instances':
resource_info.ResourceInfo(
async_collection='sql.operations',
cache_command='sql instances list',
list_format="""
table(
instance:label=NAME,
region,
settings.tier,
ipAddresses[0].ipAddress.yesno(no="-"):label=ADDRESS,
state:label=STATUS
)
""",),
'sql.instances.v1beta4':
resource_info.ResourceInfo(
async_collection='sql.operations.v1beta4',
cache_command='sql instances list',
list_format="""
table(
name,
region,
settings.tier,
ipAddresses[0].ipAddress.yesno(no="-"):label=ADDRESS,
state:label=STATUS
)
""",),
'sql.operations':
resource_info.ResourceInfo(
async_collection='default',
list_format="""
table(
operation,
operationType:label=TYPE,
startTime.iso():label=START,
endTime.iso():label=END,
error[0].code.yesno(no="-"):label=ERROR,
state:label=STATUS
)
""",),
'sql.operations.v1beta4':
resource_info.ResourceInfo(
async_collection='default',
list_format="""
table(
name,
operationType:label=TYPE,
startTime.iso():label=START,
endTime.iso():label=END,
error[0].code.yesno(no="-"):label=ERROR,
status:label=STATUS
)
""",),
'sql.sslCerts':
resource_info.ResourceInfo(
async_collection='sql.operations',
list_format="""
table(
commonName:label=NAME,
sha1Fingerprint,
expirationTime.yesno(no="-"):label=EXPIRATION
)
""",),
'sql.tiers':
resource_info.ResourceInfo(
list_format="""
table(
tier,
region.list():label=AVAILABLE_REGIONS,
RAM.size(),
DiskQuota.size():label=DISK
)
""",),
'sql.users.v1beta4':
resource_info.ResourceInfo(
async_collection='sql.operations.v1beta4',
list_format="""
table(
name.yesno(no='(anonymous)'),
host
)
""",),
# test
'test.android.devices':
resource_info.ResourceInfo( # Deprecated
list_format="""
table[box](
id:label=DEVICE_ID,
manufacturer:label=MAKE,
name:label=MODEL_NAME,
form.color(blue=VIRTUAL,yellow=PHYSICAL):label=FORM,
format("{0:4} x {1}", screenY, screenX):label=RESOLUTION,
supportedVersionIds.list(undefined="none"):label=OS_VERSION_IDS,
tags.list().color(green=default,red=deprecated,yellow=preview)
)
""",),
'test.android.models':
resource_info.ResourceInfo(
list_format="""
table[box](
id:label=MODEL_ID,
manufacturer:label=MAKE,
name:label=MODEL_NAME,
form.color(blue=VIRTUAL,yellow=PHYSICAL):label=FORM,
format("{0:4} x {1}", screenY, screenX):label=RESOLUTION,
supportedVersionIds.list(undefined="none"):label=OS_VERSION_IDS,
tags.list().color(green=default,red=deprecated,yellow=preview)
)
""",),
'test.android.versions':
resource_info.ResourceInfo(
list_format="""
table[box](
id:label=OS_VERSION_ID:align=center,
versionString:label=VERSION:align=center,
codeName,
apiLevel:align=center,
releaseDate.date(format='%Y-%m-%d'):align=center,
tags.list().color(green=default,red=deprecated,yellow=preview)
)
""",),
'test.android.locales':
resource_info.ResourceInfo(
list_format="""
table[box](
id:label=LOCALE,
name,
region,
tags.list().color(green=default,red=deprecated,yellow=preview)
)
""",),
'test.android.run.outcomes':
resource_info.ResourceInfo(
async_collection='test.android.run.url',
list_format="""
table[box](
outcome.color(red=Fail, green=Pass, yellow=Inconclusive),
axis_value:label=TEST_AXIS_VALUE,
test_details:label=TEST_DETAILS
)
""",),
'test.android.run.url':
resource_info.ResourceInfo(
list_format="""
value(format(
'Final test results will be available at [{0}].', [])
)
""",),
# special IAM roles completion case
'iam.roles':
resource_info.ResourceInfo(bypass_cache=True,),
# generic
'default':
resource_info.ResourceInfo(
list_format="""
default
""",),
'uri':
resource_info.ResourceInfo(
list_format="""
table(
uri():sort=1:label=""
)
""",),
}
def Get(collection, must_be_registered=True):
"""Returns the ResourceInfo for collection or None if not registered.
Args:
collection: The resource collection.
must_be_registered: Raises exception if True, otherwise returns None.
Raises:
UnregisteredCollectionError: If collection is not registered and
must_be_registered is True.
Returns:
The ResourceInfo for collection or None if not registered.
"""
info = RESOURCE_REGISTRY.get(collection, None)
if not info:
if not must_be_registered:
return None
raise resource_exceptions.UnregisteredCollectionError(
'Collection [{0}] is not registered.'.format(collection))
info.collection = collection
return info
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
import re
import os
import tempfile
import contextlib
import multiprocessing
import six
from .. import environment
from ..console import log
from .. import util
WIN = (os.name == "nt")
# Conda (as of version 4.7.5) is not safe to run in parallel.
# See https://github.com/conda/conda/issues/8870
# Hence, serialize the calls to it.
util.new_multiprocessing_lock("conda_lock")
def _conda_lock():
# function; for easier monkeypatching
return util.get_multiprocessing_lock("conda_lock")
@contextlib.contextmanager
def _dummy_lock():
yield
def _find_conda():
"""
Find the conda executable robustly across conda versions.
Returns
-------
conda : str
Path to the conda executable.
Raises
------
IOError
If the executable cannot be found in either the CONDA_EXE environment
variable or in the PATH.
Notes
-----
In POSIX platforms in conda >= 4.4, conda can be set up as a bash function
rather than an executable. (This is to enable the syntax
``conda activate env-name``.) In this case, the environment variable
``CONDA_EXE`` contains the path to the conda executable. In other cases,
we use standard search for the appropriate name in the PATH.
See https://github.com/airspeed-velocity/asv/issues/645 for more details.
"""
if 'CONDA_EXE' in os.environ:
conda = os.environ['CONDA_EXE']
else:
conda = util.which('conda')
return conda
class Conda(environment.Environment):
"""
Manage an environment using conda.
Dependencies are installed using ``conda``. The benchmarked
project is installed using ``pip`` (since ``conda`` doesn't have a
method to install from an arbitrary ``setup.py``).
"""
tool_name = "conda"
_matches_cache = {}
def __init__(self, conf, python, requirements, tagged_env_vars):
"""
Parameters
----------
conf : Config instance
python : str
Version of Python. Must be of the form "MAJOR.MINOR".
requirements : dict
Dictionary mapping a PyPI package name to a version
identifier string.
"""
self._python = python
self._requirements = requirements
self._conda_channels = conf.conda_channels
self._conda_environment_file = conf.conda_environment_file
super(Conda, self).__init__(conf,
python,
requirements,
tagged_env_vars)
@classmethod
def matches(cls, python):
# Calling conda can take a long time, so remember the result
if python not in cls._matches_cache:
cls._matches_cache[python] = cls._matches(python)
return cls._matches_cache[python]
@classmethod
def _matches(cls, python):
if not re.match(r'^[0-9].*$', python):
# The python name should be a version number
return False
try:
conda = _find_conda()
except IOError:
return False
else:
# This directory never gets created, since we're just
# doing a dry run below. All it needs to be is something
# that doesn't already exist.
path = os.path.join(tempfile.gettempdir(), 'check')
# Check that the version number is valid
try:
with _conda_lock():
util.check_call([
conda,
'create',
'--yes',
'-p',
path,
'python={0}'.format(python),
'--dry-run'], display_error=False, dots=False)
except util.ProcessError:
return False
else:
return True
def _setup(self):
log.info("Creating conda environment for {0}".format(self.name))
conda_args, pip_args = self._get_requirements()
env = dict(os.environ)
env.update(self.build_env_vars)
if not self._conda_environment_file:
# The user-provided env file is assumed to set the python version
conda_args = ['python={0}'.format(self._python), 'wheel', 'pip'] + conda_args
# Create a temporary environment.yml file
# and use that to generate the env for benchmarking.
env_file = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=".yml")
try:
env_file.write('name: {0}\n'
'channels:\n'.format(self.name))
env_file.writelines((' - %s\n' % ch for ch in self._conda_channels))
env_file.write('dependencies:\n')
# categorize & write dependencies based on pip vs. conda
env_file.writelines((' - %s\n' % s for s in conda_args))
if pip_args:
# and now specify the packages that are to be installed in
# the pip subsection
env_file.write(' - pip:\n')
env_file.writelines((' - %s\n' % s for s in pip_args))
env_file.close()
try:
env_file_name = self._conda_environment_file or env_file.name
self._run_conda(['env', 'create', '-f', env_file_name,
'-p', self._path, '--force'],
env=env)
if self._conda_environment_file and (conda_args or pip_args):
# Add extra packages
env_file_name = env_file.name
self._run_conda(['env', 'update', '-f', env_file_name,
'-p', self._path],
env=env)
except Exception:
if env_file_name != env_file.name:
log.info("conda env create/update failed: in {} with file {}".format(self._path, env_file_name))
elif os.path.isfile(env_file_name):
with open(env_file_name, 'r') as f:
text = f.read()
log.info("conda env create/update failed: in {} with:\n{}".format(self._path, text))
raise
finally:
os.unlink(env_file.name)
def _get_requirements(self):
if self._requirements:
# retrieve and return all conda / pip dependencies
conda_args = []
pip_args = []
for key, val in six.iteritems(self._requirements):
if key.startswith('pip+'):
if val:
pip_args.append("{0}=={1}".format(key[4:], val))
else:
pip_args.append(key[4:])
else:
if val:
conda_args.append("{0}={1}".format(key, val))
else:
conda_args.append(key)
return conda_args, pip_args
else:
return [], []
def _run_conda(self, args, env=None):
"""
Run conda command outside the environment.
"""
try:
conda = _find_conda()
except IOError as e:
raise util.UserError(str(e))
with _conda_lock():
return util.check_output([conda] + args, env=env)
def run(self, args, **kwargs):
log.debug("Running '{0}' in {1}".format(' '.join(args), self.name))
return self.run_executable('python', args, **kwargs)
def run_executable(self, executable, args, **kwargs):
# Special-case running conda, for user-provided commands
if executable == "conda":
executable = _find_conda()
lock = _conda_lock
else:
lock = _dummy_lock
# Conda doesn't guarantee that user site directories are excluded
kwargs["env"] = dict(kwargs.pop("env", os.environ),
PYTHONNOUSERSITE=str("True"))
with lock():
return super(Conda, self).run_executable(executable, args, **kwargs)
| |
from typing import Optional
import warnings
import numpy as np
from pandas._libs.algos import unique_deltas
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.ccalendar import (
DAYS,
MONTH_ALIASES,
MONTH_NUMBERS,
MONTHS,
int_to_weekday,
)
from pandas._libs.tslibs.fields import build_field_sarray
from pandas._libs.tslibs.offsets import ( # noqa:F401
DateOffset,
Day,
_get_offset,
to_offset,
)
from pandas._libs.tslibs.parsing import get_rule_month
from pandas._libs.tslibs.resolution import month_position_check
from pandas._libs.tslibs.timezones import UTC
from pandas._libs.tslibs.tzconversion import tz_convert
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_period_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.algorithms import unique
_ONE_MICRO = 1000
_ONE_MILLI = _ONE_MICRO * 1000
_ONE_SECOND = _ONE_MILLI * 1000
_ONE_MINUTE = 60 * _ONE_SECOND
_ONE_HOUR = 60 * _ONE_MINUTE
_ONE_DAY = 24 * _ONE_HOUR
# ---------------------------------------------------------------------
# Offset names ("time rules") and related functions
_offset_to_period_map = {
"WEEKDAY": "D",
"EOM": "M",
"BM": "M",
"BQS": "Q",
"QS": "Q",
"BQ": "Q",
"BA": "A",
"AS": "A",
"BAS": "A",
"MS": "M",
"D": "D",
"C": "C",
"B": "B",
"T": "T",
"S": "S",
"L": "L",
"U": "U",
"N": "N",
"H": "H",
"Q": "Q",
"A": "A",
"W": "W",
"M": "M",
"Y": "A",
"BY": "A",
"YS": "A",
"BYS": "A",
}
_need_suffix = ["QS", "BQ", "BQS", "YS", "AS", "BY", "BA", "BYS", "BAS"]
for _prefix in _need_suffix:
for _m in MONTHS:
key = f"{_prefix}-{_m}"
_offset_to_period_map[key] = _offset_to_period_map[_prefix]
for _prefix in ["A", "Q"]:
for _m in MONTHS:
_alias = f"{_prefix}-{_m}"
_offset_to_period_map[_alias] = _alias
for _d in DAYS:
_offset_to_period_map[f"W-{_d}"] = f"W-{_d}"
def get_period_alias(offset_str: str) -> Optional[str]:
"""
Alias to closest period strings BQ->Q etc.
"""
return _offset_to_period_map.get(offset_str, None)
def get_offset(name: str) -> DateOffset:
"""
Return DateOffset object associated with rule name.
.. deprecated:: 1.0.0
Examples
--------
get_offset('EOM') --> BMonthEnd(1)
"""
warnings.warn(
"get_offset is deprecated and will be removed in a future version, "
"use to_offset instead",
FutureWarning,
stacklevel=2,
)
return _get_offset(name)
# ---------------------------------------------------------------------
# Period codes
def infer_freq(index, warn: bool = True) -> Optional[str]:
"""
Infer the most likely frequency given the input index. If the frequency is
uncertain, a warning will be printed.
Parameters
----------
index : DatetimeIndex or TimedeltaIndex
If passed a Series will use the values of the series (NOT THE INDEX).
warn : bool, default True
Returns
-------
str or None
None if no discernible frequency.
Raises
------
TypeError
If the index is not datetime-like.
ValueError
If there are fewer than three values.
"""
import pandas as pd
if isinstance(index, ABCSeries):
values = index._values
if not (
is_datetime64_dtype(values)
or is_timedelta64_dtype(values)
or values.dtype == object
):
raise TypeError(
"cannot infer freq from a non-convertible dtype "
f"on a Series of {index.dtype}"
)
index = values
inferer: _FrequencyInferer
if not hasattr(index, "dtype"):
pass
elif is_period_dtype(index.dtype):
raise TypeError(
"PeriodIndex given. Check the `freq` attribute "
"instead of using infer_freq."
)
elif is_timedelta64_dtype(index.dtype):
# Allow TimedeltaIndex and TimedeltaArray
inferer = _TimedeltaFrequencyInferer(index, warn=warn)
return inferer.get_freq()
if isinstance(index, pd.Index) and not isinstance(index, pd.DatetimeIndex):
if isinstance(index, (pd.Int64Index, pd.Float64Index)):
raise TypeError(
f"cannot infer freq from a non-convertible index type {type(index)}"
)
index = index._values
if not isinstance(index, pd.DatetimeIndex):
index = pd.DatetimeIndex(index)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
class _FrequencyInferer:
"""
Not sure if I can avoid the state machine here
"""
def __init__(self, index, warn: bool = True):
self.index = index
self.i8values = index.asi8
# This moves the values, which are implicitly in UTC, to the
# the timezone so they are in local time
if hasattr(index, "tz"):
if index.tz is not None:
self.i8values = tz_convert(self.i8values, UTC, index.tz)
self.warn = warn
if len(index) < 3:
raise ValueError("Need at least 3 dates to infer frequency")
self.is_monotonic = (
self.index._is_monotonic_increasing or self.index._is_monotonic_decreasing
)
@cache_readonly
def deltas(self):
return unique_deltas(self.i8values)
@cache_readonly
def deltas_asi8(self):
# NB: we cannot use self.i8values here because we may have converted
# the tz in __init__
return unique_deltas(self.index.asi8)
@cache_readonly
def is_unique(self) -> bool:
return len(self.deltas) == 1
@cache_readonly
def is_unique_asi8(self) -> bool:
return len(self.deltas_asi8) == 1
def get_freq(self) -> Optional[str]:
"""
Find the appropriate frequency string to describe the inferred
frequency of self.i8values
Returns
-------
str or None
"""
if not self.is_monotonic or not self.index._is_unique:
return None
delta = self.deltas[0]
if _is_multiple(delta, _ONE_DAY):
return self._infer_daily_rule()
# Business hourly, maybe. 17: one day / 65: one weekend
if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):
return "BH"
# Possibly intraday frequency. Here we use the
# original .asi8 values as the modified values
# will not work around DST transitions. See #8772
elif not self.is_unique_asi8:
return None
delta = self.deltas_asi8[0]
if _is_multiple(delta, _ONE_HOUR):
# Hours
return _maybe_add_count("H", delta / _ONE_HOUR)
elif _is_multiple(delta, _ONE_MINUTE):
# Minutes
return _maybe_add_count("T", delta / _ONE_MINUTE)
elif _is_multiple(delta, _ONE_SECOND):
# Seconds
return _maybe_add_count("S", delta / _ONE_SECOND)
elif _is_multiple(delta, _ONE_MILLI):
# Milliseconds
return _maybe_add_count("L", delta / _ONE_MILLI)
elif _is_multiple(delta, _ONE_MICRO):
# Microseconds
return _maybe_add_count("U", delta / _ONE_MICRO)
else:
# Nanoseconds
return _maybe_add_count("N", delta)
@cache_readonly
def day_deltas(self):
return [x / _ONE_DAY for x in self.deltas]
@cache_readonly
def hour_deltas(self):
return [x / _ONE_HOUR for x in self.deltas]
@cache_readonly
def fields(self):
return build_field_sarray(self.i8values)
@cache_readonly
def rep_stamp(self):
return Timestamp(self.i8values[0])
def month_position_check(self):
return month_position_check(self.fields, self.index.dayofweek)
@cache_readonly
def mdiffs(self):
nmonths = self.fields["Y"] * 12 + self.fields["M"]
return unique_deltas(nmonths.astype("i8"))
@cache_readonly
def ydiffs(self):
return unique_deltas(self.fields["Y"].astype("i8"))
def _infer_daily_rule(self) -> Optional[str]:
annual_rule = self._get_annual_rule()
if annual_rule:
nyears = self.ydiffs[0]
month = MONTH_ALIASES[self.rep_stamp.month]
alias = f"{annual_rule}-{month}"
return _maybe_add_count(alias, nyears)
quarterly_rule = self._get_quarterly_rule()
if quarterly_rule:
nquarters = self.mdiffs[0] / 3
mod_dict = {0: 12, 2: 11, 1: 10}
month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]]
alias = f"{quarterly_rule}-{month}"
return _maybe_add_count(alias, nquarters)
monthly_rule = self._get_monthly_rule()
if monthly_rule:
return _maybe_add_count(monthly_rule, self.mdiffs[0])
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
day = int_to_weekday[self.rep_stamp.weekday()]
return _maybe_add_count(f"W-{day}", days / 7)
else:
return _maybe_add_count("D", days)
if self._is_business_daily():
return "B"
wom_rule = self._get_wom_rule()
if wom_rule:
return wom_rule
return None
def _get_annual_rule(self) -> Optional[str]:
if len(self.ydiffs) > 1:
return None
if len(unique(self.fields["M"])) > 1:
return None
pos_check = self.month_position_check()
return {"cs": "AS", "bs": "BAS", "ce": "A", "be": "BA"}.get(pos_check)
def _get_quarterly_rule(self) -> Optional[str]:
if len(self.mdiffs) > 1:
return None
if not self.mdiffs[0] % 3 == 0:
return None
pos_check = self.month_position_check()
return {"cs": "QS", "bs": "BQS", "ce": "Q", "be": "BQ"}.get(pos_check)
def _get_monthly_rule(self) -> Optional[str]:
if len(self.mdiffs) > 1:
return None
pos_check = self.month_position_check()
return {"cs": "MS", "bs": "BMS", "ce": "M", "be": "BM"}.get(pos_check)
def _is_business_daily(self) -> bool:
# quick check: cannot be business daily
if self.day_deltas != [1, 3]:
return False
# probably business daily, but need to confirm
first_weekday = self.index[0].weekday()
shifts = np.diff(self.index.asi8)
shifts = np.floor_divide(shifts, _ONE_DAY)
weekdays = np.mod(first_weekday + np.cumsum(shifts), 7)
return np.all(
((weekdays == 0) & (shifts == 3))
| ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))
)
def _get_wom_rule(self) -> Optional[str]:
# FIXME: dont leave commented-out
# wdiffs = unique(np.diff(self.index.week))
# We also need -47, -49, -48 to catch index spanning year boundary
# if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all():
# return None
weekdays = unique(self.index.weekday)
if len(weekdays) > 1:
return None
week_of_months = unique((self.index.day - 1) // 7)
# Only attempt to infer up to WOM-4. See #9425
week_of_months = week_of_months[week_of_months < 4]
if len(week_of_months) == 0 or len(week_of_months) > 1:
return None
# get which week
week = week_of_months[0] + 1
wd = int_to_weekday[weekdays[0]]
return f"WOM-{week}{wd}"
class _TimedeltaFrequencyInferer(_FrequencyInferer):
def _infer_daily_rule(self):
if self.is_unique:
days = self.deltas[0] / _ONE_DAY
if days % 7 == 0:
# Weekly
wd = int_to_weekday[self.rep_stamp.weekday()]
alias = f"W-{wd}"
return _maybe_add_count(alias, days / 7)
else:
return _maybe_add_count("D", days)
def _is_multiple(us, mult: int) -> bool:
return us % mult == 0
def _maybe_add_count(base: str, count: float) -> str:
if count != 1:
assert count == int(count)
count = int(count)
return f"{count}{base}"
else:
return base
# ----------------------------------------------------------------------
# Frequency comparison
def is_subperiod(source, target) -> bool:
"""
Returns True if downsampling is possible between source and target
frequencies
Parameters
----------
source : str or DateOffset
Frequency converting from
target : str or DateOffset
Frequency converting to
Returns
-------
bool
"""
if target is None or source is None:
return False
source = _maybe_coerce_freq(source)
target = _maybe_coerce_freq(target)
if _is_annual(target):
if _is_quarterly(source):
return _quarter_months_conform(
get_rule_month(source), get_rule_month(target)
)
return source in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"}
elif _is_quarterly(target):
return source in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"}
elif _is_monthly(target):
return source in {"D", "C", "B", "H", "T", "S", "L", "U", "N"}
elif _is_weekly(target):
return source in {target, "D", "C", "B", "H", "T", "S", "L", "U", "N"}
elif target == "B":
return source in {"B", "H", "T", "S", "L", "U", "N"}
elif target == "C":
return source in {"C", "H", "T", "S", "L", "U", "N"}
elif target == "D":
return source in {"D", "H", "T", "S", "L", "U", "N"}
elif target == "H":
return source in {"H", "T", "S", "L", "U", "N"}
elif target == "T":
return source in {"T", "S", "L", "U", "N"}
elif target == "S":
return source in {"S", "L", "U", "N"}
elif target == "L":
return source in {"L", "U", "N"}
elif target == "U":
return source in {"U", "N"}
elif target == "N":
return source in {"N"}
else:
return False
def is_superperiod(source, target) -> bool:
"""
Returns True if upsampling is possible between source and target
frequencies
Parameters
----------
source : str or DateOffset
Frequency converting from
target : str or DateOffset
Frequency converting to
Returns
-------
bool
"""
if target is None or source is None:
return False
source = _maybe_coerce_freq(source)
target = _maybe_coerce_freq(target)
if _is_annual(source):
if _is_annual(target):
return get_rule_month(source) == get_rule_month(target)
if _is_quarterly(target):
smonth = get_rule_month(source)
tmonth = get_rule_month(target)
return _quarter_months_conform(smonth, tmonth)
return target in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"}
elif _is_quarterly(source):
return target in {"D", "C", "B", "M", "H", "T", "S", "L", "U", "N"}
elif _is_monthly(source):
return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"}
elif _is_weekly(source):
return target in {source, "D", "C", "B", "H", "T", "S", "L", "U", "N"}
elif source == "B":
return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"}
elif source == "C":
return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"}
elif source == "D":
return target in {"D", "C", "B", "H", "T", "S", "L", "U", "N"}
elif source == "H":
return target in {"H", "T", "S", "L", "U", "N"}
elif source == "T":
return target in {"T", "S", "L", "U", "N"}
elif source == "S":
return target in {"S", "L", "U", "N"}
elif source == "L":
return target in {"L", "U", "N"}
elif source == "U":
return target in {"U", "N"}
elif source == "N":
return target in {"N"}
else:
return False
def _maybe_coerce_freq(code) -> str:
""" we might need to coerce a code to a rule_code
and uppercase it
Parameters
----------
source : string or DateOffset
Frequency converting from
Returns
-------
str
"""
assert code is not None
if isinstance(code, DateOffset):
code = code.rule_code
return code.upper()
def _quarter_months_conform(source: str, target: str) -> bool:
snum = MONTH_NUMBERS[source]
tnum = MONTH_NUMBERS[target]
return snum % 3 == tnum % 3
def _is_annual(rule: str) -> bool:
rule = rule.upper()
return rule == "A" or rule.startswith("A-")
def _is_quarterly(rule: str) -> bool:
rule = rule.upper()
return rule == "Q" or rule.startswith("Q-") or rule.startswith("BQ")
def _is_monthly(rule: str) -> bool:
rule = rule.upper()
return rule == "M" or rule == "BM"
def _is_weekly(rule: str) -> bool:
rule = rule.upper()
return rule == "W" or rule.startswith("W-")
| |
# Copyright (c) 2016 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import service
from neutron._i18n import _LE, _LI
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron.plugins.common import constants as p_constants
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.plugins.ml2.drivers.agent import _common_agent as ca
from neutron.plugins.ml2.drivers.macvtap.agent import config # noqa
from neutron.plugins.ml2.drivers.macvtap import macvtap_common
LOG = logging.getLogger(__name__)
MACVTAP_AGENT_BINARY = "neutron-macvtap-agent"
MACVTAP_FS = "/sys/class/net/"
EXTENSION_DRIVER_TYPE = 'macvtap'
class MacvtapRPCCallBack(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
amb.CommonAgentManagerRpcCallBackBase):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def network_delete(self, context, **kwargs):
LOG.debug("network_delete received")
network_id = kwargs.get('network_id')
if network_id not in self.network_map:
LOG.error(_LE("Network %s is not available."), network_id)
return
segment = self.network_map.get(network_id)
if segment and segment.network_type == p_constants.TYPE_VLAN:
if_mappings = self.agent.mgr.interface_mappings
vlan_device_name = macvtap_common.get_vlan_device_name(
if_mappings[segment.physical_network],
str(segment.segmentation_id))
ip_dev = ip_lib.IPDevice(vlan_device_name)
if ip_dev.exists():
LOG.debug("Delete %s", ip_dev.name)
ip_dev.link.delete()
else:
LOG.debug("Cannot delete vlan device %s; it does not exist",
vlan_device_name)
def port_update(self, context, **kwargs):
port = kwargs['port']
LOG.debug("port_update received for port %s ", port)
mac = port['mac_address']
# Put the device name in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.updated_devices.add(mac)
class MacvtapManager(amb.CommonAgentManagerBase):
def __init__(self, interface_mappings):
self.interface_mappings = interface_mappings
self.validate_interface_mappings()
self.mac_device_name_mappings = dict()
def validate_interface_mappings(self):
for physnet, interface in self.interface_mappings.items():
if not ip_lib.device_exists(interface):
LOG.error(_LE("Interface %(intf)s for physical network "
"%(net)s does not exist. Agent terminated!"),
{'intf': interface, 'net': physnet})
sys.exit(1)
def ensure_port_admin_state(self, device, admin_state_up):
LOG.debug("Setting admin_state_up to %s for device %s",
admin_state_up, device)
dev = ip_lib.IPDevice(self.mac_device_name_mappings[device])
if admin_state_up:
dev.link.set_up()
else:
dev.link.set_down()
def get_agent_configurations(self):
return {'interface_mappings': self.interface_mappings}
def get_agent_id(self):
devices = ip_lib.IPWrapper().get_devices(True)
if devices:
mac = utils.get_interface_mac(devices[0].name)
return 'macvtap%s' % mac.replace(":", "")
else:
LOG.error(_LE("Unable to obtain MAC address for unique ID. "
"Agent terminated!"))
sys.exit(1)
def get_devices_modified_timestamps(self, devices):
# TODO(kevinbenton): this should be implemented to detect
# rapid Nova instance rebuilds.
return {}
def get_all_devices(self):
devices = set()
all_device_names = os.listdir(MACVTAP_FS)
# Refresh the mac_device_name mapping
self.mac_device_name_mappings = dict()
for device_name in all_device_names:
if device_name.startswith(constants.MACVTAP_DEVICE_PREFIX):
mac = utils.get_interface_mac(device_name)
self.mac_device_name_mappings[mac] = device_name
devices.add(mac)
return devices
def get_extension_driver_type(self):
return EXTENSION_DRIVER_TYPE
def get_rpc_callbacks(self, context, agent, sg_agent):
return MacvtapRPCCallBack(context, agent, sg_agent)
def get_rpc_consumers(self):
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
return consumers
def plug_interface(self, network_id, network_segment, device,
device_owner):
# Setting ALLMULTICAST Flag on macvtap device to allow the guest
# receiving traffic for arbitrary multicast addresses.
# The alternative would be to let libvirt instantiate the macvtap
# device with the 'trustGuestRxFilters' option. But doing so, the guest
# would be able to change its mac address and therefore the mac
# address of the macvtap device.
dev = ip_lib.IPDevice(self.mac_device_name_mappings[device])
dev.link.set_allmulticast_on()
return True
def setup_arp_spoofing_protection(self, device, device_details):
pass
def delete_arp_spoofing_protection(self, devices):
pass
def delete_unreferenced_arp_protection(self, current_devices):
pass
def parse_interface_mappings():
try:
interface_mappings = n_utils.parse_mappings(
cfg.CONF.macvtap.physical_interface_mappings)
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
return interface_mappings
except ValueError as e:
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
"Agent terminated!"), e)
sys.exit(1)
def validate_firewall_driver():
fw_driver = cfg.CONF.SECURITYGROUP.firewall_driver
if fw_driver != 'neutron.agent.firewall.NoopFirewallDriver':
LOG.error(_LE('Unsupported configuration option for "SECURITYGROUP.'
'firewall_driver"! Only "neutron.agent.firewall.'
'NoopFirewallDriver" is supported by macvtap agent, but'
'"%s" is configured. Agent terminated!'),
fw_driver)
sys.exit(1)
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
validate_firewall_driver()
interface_mappings = parse_interface_mappings()
manager = MacvtapManager(interface_mappings)
polling_interval = cfg.CONF.AGENT.polling_interval
quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout
agent = ca.CommonAgentLoop(manager, polling_interval,
quitting_rpc_timeout,
constants.AGENT_TYPE_MACVTAP,
MACVTAP_AGENT_BINARY)
LOG.info(_LI("Agent initialized successfully, now running... "))
launcher = service.launch(cfg.CONF, agent)
launcher.wait()
| |
from sqlalchemy.orm import joinedload
from clld.db.meta import DBSession
from clld.db.models import common
from clld.web import datatables
from clld.web.datatables.base import Col, LinkCol, DetailsRowLinkCol, IdCol, LinkToMapCol
from clld.web.datatables.value import Values, ValueNameCol, RefsCol
from clld.web.datatables.unitvalue import Unitvalues, UnitValueNameCol
from clld.web.util.helpers import external_link
from sails.models import (
ConstructionFeatureDomain, FeatureDomain, Feature, sailsLanguage, Family, Designer,
sailsUnitParameter, sailsUnitValue, sailsValue,
)
class ConstructionFeatures(datatables.Unitparameters):
def base_query(self, query):
return query.join(ConstructionFeatureDomain).options(
joinedload(sailsUnitParameter.constructionfeaturedomain))
def col_defs(self):
return [
ConstructionFeatureIdCol(self, 'Id', sClass='left', model_col=sailsUnitParameter.id),
LinkCol(self, 'Feature', model_col=sailsUnitParameter.name),
ConstructionFeatureDomainCol(self, 'Domain'),
Col(self, '# Constructions', model_col=sailsUnitParameter.nconstructions),
Col(self, '# Languages', model_col=sailsUnitParameter.nlanguages),
]
class FeatureIdCol(IdCol):
def search(self, qs):
if self.model_col:
return self.model_col.contains(qs.upper())
def order(self):
return Feature.sortkey_str, Feature.sortkey_int
class ConstructionFeatureIdCol(IdCol):
def search(self, qs):
if self.model_col:
return self.model_col.contains(qs.upper())
def order(self):
return sailsUnitParameter.sortkey_str, sailsUnitParameter.sortkey_int
class LanguageIdCol(Col):
def format(self, item):
item = self.get_obj(item)
return '' if item.id.startswith('NOCODE') else item.id
class _FeatureDomainCol(Col):
def __init__(self, *args, **kw):
super(_FeatureDomainCol, self).__init__(*args, **kw)
self.choices = [a.name for a in
DBSession.query(FeatureDomain).order_by(FeatureDomain.name)]
def order(self):
return FeatureDomain.name
def search(self, qs):
return FeatureDomain.name.__eq__(qs)
class _ConstructionFeatureDomainCol(Col):
def __init__(self, *args, **kw):
super(_ConstructionFeatureDomainCol, self).__init__(*args, **kw)
self.choices = [a.name for a in DBSession.query(ConstructionFeatureDomain).order_by(ConstructionFeatureDomain.name)]
def order(self):
return ConstructionFeatureDomain.name
def search(self, qs):
return ConstructionFeatureDomain.name.__eq__(qs)
class FeatureDomainCol(_FeatureDomainCol):
def format(self, item):
return item.featuredomain.name
class ConstructionFeatureDomainCol(_ConstructionFeatureDomainCol):
def format(self, item):
return item.constructionfeaturedomain.name
class Features(datatables.Parameters):
def base_query(self, query):
return query.join(Designer).options(joinedload(Feature.designer))\
.join(FeatureDomain).options(joinedload(Feature.featuredomain))
def col_defs(self):
return [
FeatureIdCol(self, 'Id', sClass='left', model_col=Feature.id),
LinkCol(self, 'Feature', model_col=Feature.name),
FeatureDomainCol(self, 'Domain'),
Col(self, 'Designer',
model_col=Designer.contributor,
get_object=lambda i: i.designer),
Col(self, 'Languages', model_col=Feature.representation),
DetailsRowLinkCol(self, 'd', button_text='Values'),
]
class FamilyCol(Col):
def __init__(self, *args, **kw):
kw['choices'] = [
(f.pk, f.name) for f in DBSession.query(Family).order_by(Family.name)]
kw['model_col'] = Family.name
Col.__init__(self, *args, **kw)
def search(self, qs):
return Family.pk == int(qs)
class Languages(datatables.Languages):
def base_query(self, query):
return query.join(Family).options(joinedload(sailsLanguage.family)).distinct()
def col_defs(self):
return [
LinkCol(self, 'Name', model_col=sailsLanguage.name),
LanguageIdCol(self, 'ISO-639-3', sClass='left', model_col=sailsLanguage.id),
FamilyCol(self, 'Family', get_object=lambda i: i.family),
Col(self, 'Features', model_col=sailsLanguage.representation),
LinkToMapCol(self, 'm'),
]
class MoreInfo(Col):
__kw__ = {'bSortable': False, 'bSearchable': False}
def format(self, item):
if item.pdflink:
return external_link(item.pdflink, label=item.more_information)
return item.more_information
class Designers(datatables.Contributions):
def __init__(self, req, *args, **kw):
self.short = kw.pop('short', False)
if 'short' in req.params:
self.short = req.params['short'] == 'True'
super(Designers, self).__init__(req, *args, **kw)
def xhr_query(self):
return dict(short=self.short)
def col_defs(self):
if self.short:
return [
Col(self, 'Domain of Design', model_col=Designer.domain),
Col(self, 'Designer', model_col=Designer.contributor),
Col(self, 'Features', model_col=Designer.nfeatures),
Col(self, 'Languages', model_col=Designer.nlanguages),
Col(self, 'Datapoints', model_col=Designer.ndatapoints),
]
return [
Col(self, 'Designer', model_col=Designer.contributor),
Col(self, 'Domain of Design', model_col=Designer.domain),
Col(self, 'Citation', model_col=Designer.citation),
Col(self, 'Features', model_col=Designer.nfeatures, input_size='mini'),
Col(self, 'Languages', model_col=Designer.nlanguages, input_size='mini'),
Col(self, 'Datapoints', model_col=Designer.ndatapoints, input_size='mini'),
MoreInfo(self, 'More Information'),
]
def get_options(self):
if self.short:
return {'bLengthChange': False, 'bPaginate': False}
class Datapoints(Values):
def base_query(self, query):
query = Values.base_query(self, query)
if self.language:
query = query.options(
joinedload(common.Value.valueset).joinedload(common.ValueSet.parameter),
joinedload(common.Value.domainelement),
)
elif self.parameter:
query = query.outerjoin(Family).options(
joinedload(common.Value.valueset)
.joinedload(common.ValueSet.language)
.joinedload(sailsLanguage.family))
return query
def col_defs(self):
name_col = ValueNameCol(self, 'value')
if self.parameter and self.parameter.domain:
name_col.choices = [de.name for de in self.parameter.domain]
cols = []
if self.parameter:
cols = [
LinkCol(
self, 'Name',
model_col=common.Language.name,
get_object=lambda i: i.valueset.language),
LanguageIdCol(
self, 'ISO-639-3',
model_col=common.Language.id,
get_object=lambda i: i.valueset.language),
FamilyCol(
self, 'Family',
get_object=lambda i: i.valueset.language.family)]
elif self.language:
cols = [
FeatureIdCol(
self, 'Feature Id',
sClass='left', model_col=common.Parameter.id,
get_object=lambda i: i.valueset.parameter),
LinkCol(
self, 'Feature',
model_col=common.Parameter.name,
get_object=lambda i: i.valueset.parameter)]
cols = cols + [
name_col,
Col(self, 'description'),
#RefsCol(self, 'source'),
RefsCol(self, 'Source',
model_col=common.ValueSet.source,
get_object=lambda i: i.valueset),
Col(self, 'Comment', model_col=sailsValue.comment)
]
return cols
def get_options(self):
if self.language or self.parameter:
# if the table is restricted to the values for one language, the number ofs
# features is an upper bound for the number of values; thus, we do not
# paginate.
return {'bLengthChange': False, 'bPaginate': False}
class Constructions(datatables.Units):
def col_defs(self):
return [
LinkCol(
self, 'language', model_col=common.Language.name, get_obj=lambda i: i.language),
LinkCol(self, 'construction name'),
]
class ConstructionValues(Unitvalues):
def base_query(self, query):
query = Unitvalues.base_query(self, query).options(
joinedload(sailsUnitValue.unitparameter))
return query
def col_defs(self):
name_col = UnitValueNameCol(self, 'value')
if self.unitparameter and self.unitparameter.domain:
name_col.choices = sorted([de.name for de in self.unitparameter.domain])
return [
LinkCol(self, 'Construction', get_obj=lambda i: i.unit, model_col=common.Unit.name),
ConstructionFeatureIdCol(self, 'Feature Id', sClass='left', model_col=sailsUnitParameter.id, get_obj=lambda i: i.unitparameter),
LinkCol(self, 'Feature', get_obj=lambda i: i.unitparameter, model_col=common.UnitParameter.name),
name_col,
Col(self, 'Source', model_col=sailsUnitValue.source),
Col(self, 'Comment', model_col=sailsUnitValue.comment)
]
def includeme(config):
config.register_datatable('contributions', Designers)
config.register_datatable('values', Datapoints)
config.register_datatable('languages', Languages)
config.register_datatable('parameters', Features)
config.register_datatable('unitparameters', ConstructionFeatures)
config.register_datatable('constructions', Constructions)
config.register_datatable('units', Constructions)
config.register_datatable('unitvalues', ConstructionValues)
| |
""" Defines the LassoSelection controller class.
"""
# Major library imports
import numpy
from numpy import array, empty, sometrue, transpose, vstack, zeros
# Enthought library imports
from traits.api import Any, Array, Enum, Event, Bool, Instance, \
Property, Trait, List
from kiva.agg import points_in_polygon
# Chaco imports
from chaco.api import AbstractController, AbstractDataSource, \
BaseXYPlot, Base2DPlot
class LassoSelection(AbstractController):
""" A controller that represents the interaction of "lassoing" a set of
points.
"Lassoing" means drawing an arbitrary selection region around the points
by dragging the mouse along the outline of the region.
"""
# An Nx2 array of points in data space representing all selected points.
dataspace_points = Property(Array)
# A list of all the selection polygons.
disjoint_selections = Property(List)
# Fires whenever **dataspace_points** changes, necessitating a redraw of the
# selection region.
updated = Event
# Fires when the selection mask changes.
selection_changed = Event
# Fires when the user release the mouse button and finalizes the selection.
selection_completed = Event
# If True, the selection mask is updated as the mouse moves, rather
# than only at the beginning and end of the selection operation.
incremental_select = Bool(False)
# The selection mode of the lasso pointer: "include", "exclude" or
# "invert" points from the selection. The "include" and "exclude"
# settings essentially invert the selection mask. The "invert" setting
# differs from "exclude" in that "invert" inverses the selection of all
# points the the lasso'ed polygon, while "exclude" operates only on
# points included in a previous selection.
selection_mode = Enum("include", "exclude", "invert")
# The data source that the mask of selected points is attached to. Note
# that the indices in this data source must match the indices of the data
# in the plot.
selection_datasource = Instance(AbstractDataSource)
# Mapping from screen space to data space. By default, it is just
# self.component.
plot = Property
# The button which this tool responds to
drag_button = Enum("left", "right")
# The possible event states of this selection tool (overrides
# enable.Interactor).
#
# normal:
# Nothing has been selected, and the user is not dragging the mouse.
# selecting:
# The user is dragging the mouse and is actively changing the
# selection region.
event_state = Enum('normal', 'selecting')
#----------------------------------------------------------------------
# Private Traits
#----------------------------------------------------------------------
# The PlotComponent associated with this tool.
_plot = Trait(None, Any)
# To support multiple selections, a list of cached selections and the
# active selection are maintained. A single list is not used because the
# active selection is re-created every time a new point is added via
# the vstack function.
_active_selection = Array
_previous_selections = List(Array)
#----------------------------------------------------------------------
# Properties
#----------------------------------------------------------------------
def _get_dataspace_points(self):
""" Returns a complete list of all selected points.
This property exists for backwards compatibility, as the
disjoint_selections property is almost always the preferred
method of accessingselected points
"""
composite = empty((0,2))
for region in self.disjoint_selections:
if len(region) > 0:
composite = vstack((composite, region))
return composite
def _get_disjoint_selections(self):
""" Returns a list of all disjoint selections composed of
the previous selections and the active selection
"""
if len(self._active_selection) == 0:
return self._previous_selections
else:
return self._previous_selections + [self._active_selection]
#----------------------------------------------------------------------
# Event Handlers
#----------------------------------------------------------------------
def normal_left_down(self, event):
if self.drag_button == "left":
return self.normal_mouse_down(event)
def normal_right_down(self, event):
if self.drag_button == "right":
return self.normal_mouse_down(event)
def normal_mouse_down(self, event):
""" Handles the left mouse button being pressed while the tool is
in the 'normal' state.
Puts the tool into 'selecting' mode, and starts defining the selection.
"""
# We may want to generalize this for the n-dimensional case...
self._active_selection = empty((0,2), dtype=numpy.bool)
if self.selection_datasource is not None:
self.selection_datasource.metadata['selection'] = zeros(len(self.selection_datasource.get_data()), dtype=numpy.bool)
self.selection_mode = "include"
self.event_state = 'selecting'
self.selecting_mouse_move(event)
if (not event.shift_down) and (not event.control_down):
self._previous_selections = []
else:
if event.control_down:
self.selection_mode = "exclude"
else:
self.selection_mode = "include"
self.trait_property_changed("disjoint_selections", [], self.disjoint_selections)
return
def selecting_left_up(self, event):
if self.drag_button == "left":
return self.selecting_mouse_up(event)
def selecting_right_up(self, event):
if self.drag_button == "right":
return self.selecting_mouse_up(event)
def selecting_mouse_up(self, event):
""" Handles the mouse button coming up in the 'selecting' state.
Completes the selection and switches to the 'normal' state.
"""
self.event_state = 'normal'
self.selection_completed = True
self._update_selection()
self._previous_selections.append(self._active_selection)
self._active_selection = empty((0,2), dtype=numpy.bool)
return
def selecting_mouse_move(self, event):
""" Handles the mouse moving when the tool is in the 'selecting' state.
The selection is extended to the current mouse position.
"""
# Translate the event's location to be relative to this container
xform = self.component.get_event_transform(event)
event.push_transform(xform, caller=self)
new_point = self._map_data(array((event.x, event.y)))
self._active_selection = vstack((self._active_selection, array((new_point,))))
self.updated = True
if self.incremental_select:
self._update_selection()
# Report None for the previous selections
self.trait_property_changed("disjoint_selections", None)
return
def selecting_mouse_leave(self, event):
""" Handles the mouse leaving the plot when the tool is in the
'selecting' state.
Ends the selection operation.
"""
print event
return
def normal_key_pressed(self, event):
""" Handles the user pressing a key in the 'normal' state.
If the user presses the Escape key, the tool is reset.
"""
if event.character == "Esc":
self._reset()
elif event.character == 'a' and event.control_down:
self._reset()
self._select_all()
elif event.character == 'i' and event.control_down:
self.selecting_mouse_up(None)
self.selection_mode = 'invert'
self._select_all()
return
#----------------------------------------------------------------------
# Protected Methods
#----------------------------------------------------------------------
def _dataspace_points_default(self):
return empty((0,2), dtype=numpy.bool)
def _reset(self):
""" Resets the selection
"""
self.event_state='normal'
self._active_selection = empty((0,2), dtype=numpy.bool)
self._previous_selections = []
self._update_selection()
def _select_all(self):
""" Selects all points in the plot. This is done by making a rectangle
using the corners of the plot, which is simple but effective. A
much cooler, but more time-intensive solution would be to make
a selection polygon representing the convex hull.
"""
points = [self._map_data(array((self.plot.x, self.plot.y2))),
self._map_data(array((self.plot.x2, self.plot.y2))),
self._map_data(array((self.plot.x2, self.plot.y))),
self._map_data(array((self.plot.x, self.plot.y)))]
self._active_selection = numpy.array(points)
self._update_selection()
def _update_selection(self):
""" Sets the selection datasource's 'selection' metadata element
to a mask of all the points selected
"""
if self.selection_datasource is None:
return
selected_mask = zeros(self.selection_datasource._data.shape, dtype=numpy.bool)
data = self._get_data()
# Compose the selection mask from the cached selections first, then
# the active selection, taking into account the selection mode only
# for the active selection
for selection in self._previous_selections:
selected_mask |= (points_in_polygon(data, selection, False))
if self.selection_mode == 'exclude':
selected_mask |= (points_in_polygon(data, self._active_selection, False))
selected_mask = 1 - selected_mask
elif self.selection_mode == 'invert':
selected_mask = -1 * (selected_mask -points_in_polygon(data, self._active_selection, False))
else:
selected_mask |= (points_in_polygon(data, self._active_selection, False))
if sometrue(selected_mask != self.selection_datasource.metadata['selection']):
self.selection_datasource.metadata['selection'] = selected_mask
self.selection_changed = True
return
def _map_screen(self, points):
""" Maps a point in data space to a point in screen space on the plot.
Normally this method is a pass-through, but it may do more in
specialized plots.
"""
return self.plot.map_screen(points)[:,:2]
def _map_data(self, point):
""" Maps a point in screen space to data space.
Normally this method is a pass-through, but for plots that have more
data than just (x,y), proper transformations need to happen here.
"""
if isinstance(self.plot, Base2DPlot):
# Base2DPlot.map_data takes an array of points, for some reason
return self.plot.map_data([point])[0]
elif isinstance(self.plot, BaseXYPlot):
return self.plot.map_data(point, all_values=True)[:2]
else:
raise RuntimeError("LassoSelection only supports BaseXY and Base2D plots")
def _get_data(self):
""" Returns the datapoints in the plot, as an Nx2 array of (x,y).
"""
return transpose(array((self.plot.index.get_data(), self.plot.value.get_data())))
#------------------------------------------------------------------------
# Property getter/setters
#------------------------------------------------------------------------
def _get_plot(self):
if self._plot is not None:
return self._plot
else:
return self.component
def _set_plot(self, val):
self._plot = val
return
| |
from __future__ import absolute_import, division
from collections import defaultdict
from hashlib import md5
from lxml import etree
from sqlalchemy.exc import IntegrityError
from changes.config import db, redis
from changes.models.filecoverage import FileCoverage
from changes.utils.diff_parser import DiffParser
from .base import ArtifactHandler
class CoverageHandler(ArtifactHandler):
def process(self, fp):
results = self.get_coverage(fp)
for result in results:
try:
with db.session.begin_nested():
db.session.add(result)
except IntegrityError:
lock_key = 'coverage:{job_id}:{file_hash}'.format(
job_id=result.job_id.hex,
file_hash=md5(result.filename).hexdigest(),
)
with redis.lock(lock_key):
result = self.merge_coverage(result)
db.session.add(result)
db.session.commit()
return results
def merge_coverage(self, new):
existing = FileCoverage.query.filter(
FileCoverage.job_id == new.job_id,
FileCoverage.filename == new.filename,
).first()
cov_data = []
for lineno in range(max(len(existing.data), len(new.data))):
try:
old_cov = existing.data[lineno]
except IndexError:
pass
try:
new_cov = new.data[lineno]
except IndexError:
pass
if old_cov == 'C' or new_cov == 'C':
cov_data.append('C')
elif old_cov == 'U' or new_cov == 'U':
cov_data.append('U')
else:
cov_data.append('N')
existing.data = ''.join(cov_data)
self.add_file_stats(existing)
return existing
def process_diff(self):
lines_by_file = defaultdict(set)
try:
source = self.step.job.build.source
except AttributeError:
return lines_by_file
diff = source.generate_diff()
if not diff:
return lines_by_file
diff_parser = DiffParser(diff)
parsed_diff = diff_parser.parse()
for file_diff in parsed_diff:
for diff_chunk in file_diff['chunks']:
if not file_diff['new_filename']:
continue
lines_by_file[file_diff['new_filename'][2:]].update(
d['new_lineno'] for d in diff_chunk if d['action'] in ('add', 'del')
)
return lines_by_file
def get_processed_diff(self):
if not hasattr(self, '_processed_diff'):
self._processed_diff = self.process_diff()
return self._processed_diff
def add_file_stats(self, result):
diff_lines = self.get_processed_diff()[result.filename]
lines_covered = 0
lines_uncovered = 0
diff_lines_covered = 0
diff_lines_uncovered = 0
for lineno, code in enumerate(result.data):
# lineno is 1-based in diff
line_in_diff = bool((lineno + 1) in diff_lines)
if code == 'C':
lines_covered += 1
if line_in_diff:
diff_lines_covered += 1
elif code == 'U':
lines_uncovered += 1
if line_in_diff:
diff_lines_uncovered += 1
result.lines_covered = lines_covered
result.lines_uncovered = lines_uncovered
result.diff_lines_covered = diff_lines_covered
result.diff_lines_uncovered = diff_lines_uncovered
def get_coverage(self, fp):
"""
Return a phabricator-capable coverage mapping.
>>> {
>>> 'foo.py': 'NNNUUUUUUUUUUUUCCCUUUUUCCCCCCCCCNNCNCNCCCNNNN',
>>> }
Line flags consists of a single character coverage indicator for each line in the file.
- N: no coverage available
- U: uncovered
- C: covered
"""
root = etree.fromstring(fp.read())
if root.tag == 'coverage':
return self.get_cobertura_coverage(root)
elif root.tag == 'report':
return self.get_jacoco_coverage(root)
raise NotImplementedError('Unsupported coverage format')
def get_cobertura_coverage(self, root):
step = self.step
job = self.step.job
results = []
for node in root.iter('class'):
filename = node.get('filename')
if not filename:
self.logger.warn('Unable to determine filename for node: %s', node)
continue
file_coverage = []
for lineset in node.iterchildren('lines'):
lineno = 0
for line in lineset.iterchildren('line'):
number, hits = int(line.get('number')), int(line.get('hits'))
if lineno < number - 1:
for lineno in range(lineno, number - 1):
file_coverage.append('N')
if hits > 0:
file_coverage.append('C')
else:
file_coverage.append('U')
lineno = number
result = FileCoverage(
step_id=step.id,
job_id=job.id,
project_id=job.project_id,
filename=filename,
data=''.join(file_coverage),
)
self.add_file_stats(result)
results.append(result)
return results
def get_jacoco_coverage(self, root):
step = self.step
job = self.step.job
results = []
for package in root.iter('package'):
package_path = 'src/main/java/{}'.format(package.get('name'))
for sourcefile in package.iter('sourcefile'):
# node name resembles 'com/example/foo/bar/Resource'
filename = '{filepath}/{filename}'.format(
filepath=package_path,
filename=sourcefile.get('name'),
)
file_coverage = []
lineno = 0
for line in sourcefile.iterchildren('line'):
number, hits = int(line.get('nr')), int(line.get('ci'))
if lineno < number - 1:
for lineno in range(lineno, number - 1):
file_coverage.append('N')
if hits > 0:
file_coverage.append('C')
else:
file_coverage.append('U')
lineno = number
result = FileCoverage(
step_id=step.id,
job_id=job.id,
project_id=job.project_id,
filename=filename,
data=''.join(file_coverage),
)
self.add_file_stats(result)
results.append(result)
return results
| |
import os
import sys
from threading import Thread
from pyelectro import analysis
import numpy
import math
import pprint
pp = pprint.PrettyPrinter(indent=4)
def alpha_normalised_cost_function(value,target,base=10):
"""Fitness of a value-target pair from 0 to 1
.. WARNING:
I've found that this cost function is producing some odd behaviour.
It is best avoided until this is investigated
For any value/target pair will give a normalised value for
agreement 1 is complete value-target match and 0 is 0 match.
A mirrored exponential function is used.
The fitness is given by the expression :math:`fitness = base^{-x}`
where:
.. math::
x = {\dfrac{(value-target)}{(target + 0.01)^2}}
:param value: value measured
:param t: target
:param base: the value 'base' in the above mathematical expression for x
:return: fitness - a real number from 0 to 1
"""
value = float(value)
target = float(target)
x=((value-target)/(target+0.01))**2 #the 0.01 thing is a bit of a hack at the moment.
fitness=base**(-x)
return fitness
def normalised_cost_function(value,target,Q=None):
""" Returns fitness of a value-target pair from 0 to 1
For any value/target pair will give a normalised value for
agreement 0 is complete value-target match and 1 is "no" match.
If no Q is assigned, it is set such that it satisfies the condition
fitness=0.7 when (target-valu)e=10*target. This is essentially
empirical and seems to work. Mathematical derivation is on Mike Vella's
Lab Book 1 p.42 (page dated 15/12/11).
:param value: value measured
:param t: target
:param Q: This is the sharpness of the cost function, higher values correspond
to a sharper cost function. A high Q-Value may lead an optimizer to a solution
quickly once it nears the solution.
:return: fitness value from 0 to 1
"""
value = float(value)
target = float(target)
if Q==None:
if target != 0:
Q=7/(300*(target**2))
else:
Q=0.023333 # PG: Gives fitness = 0.023333 when value = 1; fitness = 0.7 when value = 10
try:
fitness=1-1/(Q*(target-value)**2+1)
except:
print("Exeption when calculating the fitness function; target: %s; value %s; Q: %s"%(target,value,Q))
fitness = 1
return fitness
class __CandidateData(object):
"""Container for information about a candidate (chromosome)"""
def __init__(self,chromosome):
self.chromosome=chromosome
def set_dbpath(self,dbpath):
self.dbpath=dbpath
def set_exp_id(self,exp_id):
self.exp_id=exp_id
def set_job_num(self,job_num):
self.job_num=job_num
class __Evaluator(object):
"""Base class for Evaluators"""
def __init__(self,parameters,weights,targets,controller):
self.parameters=parameters
self.weights=weights
self.targets=targets
self.controller=controller
'''
PG: Disabling these until they're tested again...
class __CondorContext(object):
"""manager for dealing with a condor-based grid"""
def __split_list(self,alist, wanted_parts=1):
length = len(alist)
return [ alist[i*length // wanted_parts: (i+1)*length // wanted_parts]
for i in range(wanted_parts) ]
def __prepare_candidates(self,candidates,candidates_per_job=1):
#Split candidate list into smaller ones (jobs):
#and make a job list
if optimizer_params.candidates_in_job != None:
candidates_in_job=optimizer_params.candidates_in_job
else:
candidates_in_job=candidates_per_job
num_candidates=len(candidates)
ids=range(num_candidates)
enumerated_candidates=zip(candidates,ids)
num_jobs=num_candidates/candidates_in_job
self.num_jobs=num_jobs
self.job_list=self.__split_list(enumerated_candidates,wanted_parts=self.num_jobs)
def __make_job_file(self,job,job_number):
#write the header:
filepath = os.path.join(self.tmpdir, 'run' + str(job_number) + '.sh')
run_shell = open(filepath, 'w')
run_shell.write('#!/bin/bash\n')
run_shell.write('reldir=`dirname $0`\n')
run_shell.write('cd $reldir\n')
run_shell.write('directory=`pwd`\n')
run_shell.write('pndirectory=$directory\n')
run_shell.write('#Untar the file:\n')
run_shell.write('/bin/tar xzf ./portable-neuron.tar.gz\n')
tarfile_name=optimizer_params.tarred_nrnproj
run_shell.write('/bin/tar xzf ./'+tarfile_name+'\n')
#CandidateData_list=[]
for enumerated_candidate in job:
chromosome = enumerated_candidate[0]
candidate_info = CandidateData(chromosome)
exp_id = enumerated_candidate[1]
candidate_info.set_exp_id(exp_id)
candidate_info.set_job_num(job_number)
self.CandidateData_list.append(candidate_info)
nproj = controllers.NrnProjSimRun(optimizer_params.project_path, chromosome)
run_shell.write('#issue the commands\n')
run_shell.write('$pndirectory/pnpython.sh \
$directory/src/simrunner.py "sim_var[\'exp_id\'] \
= ' + str(exp_id) + '\" ' + '"sim_var[\'''dbname''\'] \
= \'outputdb' + str(job_number) + '.sqlite\'"' +
nproj.sim_var_string + '\n')
run_shell.write('echo \'done\'\n')
run_shell.write('cp $directory/sims/outputdb' + str(job_number) + '.sqlite $directory\n')
#self.CandidateData_list=CandidateData_list
run_shell.close()
def __make_submit_file(self):
#now we write the submit file
filepath = os.path.join(self.tmpdir, 'submitfile.submit')
submit_file=open(filepath,'w')
submit_file.write('universe = vanilla\n')
submit_file.write('log = pneuron.log\n')
submit_file.write('Error = err.$(Process)\n')
submit_file.write('Output = out.$(Process)\n')
submit_file.write('requirements = GLIBC == "2.11"\n')
tarfile_name=optimizer_params.tarred_nrnproj
submit_file.write('transfer_input_files = portable-neuron.tar.gz,'+tarfile_name+'\n')
submit_file.write('should_transfer_files = yes\n')
submit_file.write('when_to_transfer_output = on_exit_or_evict\n')
#this is where you have to do the clever stuff:
for shellno in range(self.num_jobs):
submit_file.write('executable = run'+str(shellno)+'.sh\n')
submit_file.write('queue\n')
#finally close the submit file
submit_file.close()
def __build_condor_files(self,candidates,candidates_per_job=100):
#prepare list of candidates to be farmed on grid:
self.__prepare_candidates(candidates,candidates_per_job=100)
#make the job files (shell scripts to be executed on the execute nodes)
job_number=0 #run shell script number
for job in self.job_list:
self.__make_job_file(job,job_number)
job_number+=1
#now make the submit file
self.__make_submit_file()
def __delete_remote_files(self,host):
import ssh_utils
command='rm -rf ./*'
ssh_utils.issue_command(host, command)
def __put_multiple_files(self,host,filelist,localdir='/',remotedir='/'):
import ssh_utils
for file in filelist:
localpath=os.path.join(localdir,file)
remotepath=os.path.join(remotedir,file)
ssh_utils.put_file(host,localpath,remotepath)
'''
class DumbEvaluator(__Evaluator):
"""
The simulations themselves report their fitness. The evaluator
just reads them from a file. Requires the appropriate controller.
"""
def __init__(self,controller,fitness_filename_prefix,threads_number=1):
self.controller = controller
self.fitness_filename_prefix = fitness_filename_prefix
self.threads_number = threads_number
def evaluate(self,candidates,args):
threads_number = int(self.threads_number)
candidates_per_thread = (len(candidates)) / threads_number
remainder_candidates = len(candidates) % threads_number
chunk_begin = 0
chunk_end = candidates_per_thread
if remainder_candidates != 0:
chunk_end += 1
threads = []
try:
for i in range(0, threads_number):
#if fitness file exists need to destroy it:
file_name = self.fitness_filename_prefix + str(i)
if os.path.exists(file_name):
os.remove(file_name)
#run the candidates:
candidate_section=candidates[chunk_begin:chunk_end]
threads.append(Thread(target=self.controller.run, args=(candidate_section,args,file_name,)))
threads[i].daemon=True
threads[i].start()
chunk_begin = chunk_end
chunk_end += candidates_per_thread
if i < (remainder_candidates - 1):
chunk_end += 1
fitness = []
for i in range(0, threads_number):
# we should let the main thread handle keybord interrupts
while True:
threads[i].join(1)
if not threads[i].isAlive():
break
#get their fitness from the file
file_name = self.fitness_filename_prefix + str(i)
threads[i].join()
fitness = fitness + [float(i) for i in open(file_name).readlines()]
os.remove(file_name)
except (KeyboardInterrupt, SystemExit):
sys.exit("Interrupted by ctrl+c\n")
return fitness
class IClampEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(self,
analysis_start_time,
controller,
analysis_end_time,
target_data_path,
parameters,
analysis_var,
weights,
targets=None,
automatic=False):
super(IClampEvaluator, self).__init__(parameters,
weights,
targets,
controller)
self.analysis_start_time=analysis_start_time
self.analysis_end_time=analysis_end_time
self.target_data_path=target_data_path
self.analysis_var=analysis_var
print('target data path in evaluator:'+target_data_path)
if automatic == True:
t , v_raw = analysis.load_csv_data(target_data_path)
v = numpy.array(v_raw)
v_smooth = list(analysis.smooth(v))
ic_analysis = analysis.IClampAnalysis(v_smooth,
t,
analysis_var,
start_analysis=analysis_start_time,
end_analysis=analysis_end_time)
ic_analysis.analyse()
self.targets = ic_analysis.analysis_results
print('Obtained targets are:')
print(self.targets)
def evaluate(self,candidates,args):
print("\n>>>>> Evaluating: ")
for cand in candidates: print(">>>>> %s"%cand)
simulations_data = self.controller.run(candidates,
self.parameters)
fitness = []
for data in simulations_data:
times = data[0]
samples = data[1]
data_analysis=analysis.IClampAnalysis(samples,
times,
self.analysis_var,
start_analysis=self.analysis_start_time,
end_analysis=self.analysis_end_time,
target_data_path=self.target_data_path)
try:
data_analysis.analyse()
except:
data_analysis.analysable_data = False
fitness_value = self.evaluate_fitness(data_analysis,
self.targets,
self.weights,
cost_function=normalised_cost_function)
fitness.append(fitness_value)
print('Fitness: %s\n'%fitness_value)
return fitness
def evaluate_fitness(self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: IClampAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
#calculate max fitness value (TODO: there may be a more pythonic way to do this..)
worst_cumulative_fitness=0
for target in target_dict.keys():
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
worst_cumulative_fitness += target_weight
#if we have 1 or 0 peaks we won't conduct any analysis
if data_analysis.analysable_data == False:
print('Data is non-analysable')
return worst_cumulative_fitness
else:
fitness = 0
for target in target_dict.keys():
target_value=target_dict[target]
cost = '?'
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
if target_weight > 0:
value = data_analysis.analysis_results[target]
#let function pick Q automatically
cost = cost_function(value,target_value)
inc = target_weight*cost
fitness += inc
print('Target %s (weight %s): target val: %s, actual: %s, cost: %s, fitness inc: %s'%(target, target_weight, target_value, value, cost, inc))
return fitness
class NetworkEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(self,
analysis_start_time,
controller,
analysis_end_time,
parameters,
analysis_var,
weights,
targets=None):
super(NetworkEvaluator, self).__init__(parameters,
weights,
targets,
controller)
self.analysis_start_time=analysis_start_time
self.analysis_end_time=analysis_end_time
self.analysis_var=analysis_var
self.targets=targets
def evaluate(self,candidates,args):
print("\n>>>>> Evaluating: ")
for cand in candidates: print(">>>>> %s"%cand)
simulations_data = self.controller.run(candidates,
self.parameters)
fitness = []
for i in range(len(simulations_data)):
data = simulations_data[i]
candidate = candidates[i]
times = data[0]
volts = data[1]
data_analysis=analysis.NetworkAnalysis(volts,
times,
self.analysis_var,
start_analysis=self.analysis_start_time,
end_analysis=self.analysis_end_time)
print('- Evaluating %s from %s -> %s (data %s -> %s)' % \
(candidate,
self.analysis_start_time,
self.analysis_end_time,
times[0],
times[-1]))
data_analysis.analyse(self.targets)
fitness_value = self.evaluate_fitness(data_analysis,
self.targets,
self.weights,
cost_function=normalised_cost_function)
fitness.append(fitness_value)
print('Fitness: %s\n'%fitness_value)
return fitness
def evaluate_fitness(self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: NetworkAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
fitness = 0
for target in target_dict.keys():
target_value=target_dict[target]
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 0 # If it's not mentioned assunme weight = 0!
if target_weight > 0:
inc = target_weight # default...
cost = '?'
if data_analysis.analysis_results.has_key(target):
value = data_analysis.analysis_results[target]
if not math.isnan(value):
#let function pick Q automatically
cost = cost_function(value,target_value)
inc = target_weight*cost
else:
value = '<<infinite value!>>'
inc = target_weight
else:
value = '<<cannot be calculated! (only: %s; peak_threshold: %s)>>'%(data_analysis.analysis_results.keys(),self.analysis_var['peak_threshold'])
fitness += inc
print('Target %s (weight %s): target val: %s, actual: %s, cost: %s, fitness inc: %s'%(target, target_weight, target_value, value, cost, inc))
return fitness
'''
class IClampCondorEvaluator(IClampEvaluator):
"""
Evaluate simulations and return their fitness on a condor grid.
Tested and known to work on CamGrid
(http://www.escience.cam.ac.uk/projects/camgrid/)
WARNING:
this entire class should now be considered obsolete, the evaluator
is just an IClampEvaluator and everything here that is different
from that class needs to become its own controller
"""
def __init__(self,local_analysis=False):
super(IClampCondorEvaluator,self).__init__()
#other things like the number of nodes to divide the work onto and
#host connection parameters need to go into this constructor
if local_analysis:
self.evaluate=self.__local_evaluate
else:
self.evaluate=self.__remote_evaluate__
def __condor_evaluate(self,candidates,args):
"""
Run simulations on grid and analyse data locally
WARNING: (???I'm quite confused here...there is a mistake somewhere
as the name doesn't match the description - which method is which?)
Once each generation has finished, all data is pulled to local
workstation in form of sqlite databases (1 database per job)
and these are analysed and the fitness estimated sequentially
the fitness array is then returned.
"""
import time
import ssh_utils
self.CandidateData_list=[]
self.__build_condor_files(candidates) #Build submit and runx.sh files, exp_id now corresponds to position in chromosome and fitness arrays
messagehost=ssh_utils.host(optimizer_params.host,optimizer_params.username,optimizer_params.password,optimizer_params.port)
self.__delete_remote_files__(messagehost)#delete everything in thssh_utilse directory you're about to put files in
filelist=os.listdir(self.tmpdir)
self.__put_multiple_files(messagehost,filelist,localdir=self.tmpdir,remotedir=optimizer_params.remotedir)#copy local files over
filelist=os.listdir(self.portableswdir)
self.__put_multiple_files(messagehost,filelist,localdir=self.portableswdir,remotedir=optimizer_params.remotedir)#copy local files over
ssh_utils.issue_command(messagehost,'export PATH=/opt/Condor/release/bin:$PATH\ncondor_submit submitfile.submit')
self.jobdbnames=[]
for job_num in range(self.num_jobs): #make a list of the databases we need:
jobdbname='outputdb'+str(job_num)+'.sqlite'
self.jobdbnames.append(jobdbname)
#wait till you know file exists:
dbs_created=False
pulled_dbs=[] # list of databases which have been extracted from remote server
while (dbs_created==False):
print('waiting..')
time.sleep(20)
print('checking if dbs created:')
command='ls'
remote_filelist=ssh_utils.issue_command(messagehost, command)
for jobdbname in self.jobdbnames:
db_exists=jobdbname+'\n' in remote_filelist
if (db_exists==False):
print(jobdbname +' has not been generated')
dbs_created=False
elif db_exists==True and jobdbname not in pulled_dbs:
print(jobdbname +' has been generated')
remotefile=optimizer_params.remotedir+jobdbname
localpath=os.path.join(self.datadir,str(self.generation)+jobdbname)
ssh_utils.get_file(messagehost,remotefile,localpath)
pulled_dbs.append(jobdbname) #so that it is not extracted more than once
#here pop-in the fitness evaluation
if len(pulled_dbs)==len(self.jobdbnames):
dbs_created=True
fitness=[]
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname=str(self.generation)+'outputdb'+str(job_num)+'.sqlite'
dbpath=os.path.join(self.datadir,dbname)
exp_id=CandidateData.exp_id
connection=sqldbutils.db_connect(dbpath) #establish a database connection
query='SELECT numerical_value\
FROM output_params WHERE experiment_id=\
'+str(exp_id)+' AND parameter="fitness"'
exp_fitness=sqldbutils.execute_query(connection,query)
exp_fitness=exp_fitness.fetchall()
exp_fitness=exp_fitness[0][0]
#print('fitness: %s'%exp_fitness)
fitness.append(exp_fitness)
self.generation+=1
return fitness
def __local_evaluate(self,candidates,args):
import time
self.CandidateData_list=[]
analysis_var=self.analysis_var
#Build submitfile.submit and runx.sh files:
self.__build_condor_files(candidates) #exp_id now corresponds to position in chromosome/fitness array
fitness=[]
#submit the jobs to the grid
os.chdir(self.tmpdir)
os.system('condor_submit submitfile.submit')
#wait till you know file exists:
dbs_created=False
while (dbs_created==False):
print('checking if dbs created:')
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
db_exists=os.path.exists(jobdbpath)
if (db_exists==False):
time.sleep(60)
dbs_created=False
break
dbs_created=True
for CandidateData in self.CandidateData_list:
job_num = CandidateData.job_num
dbname='/outputdb'+str(job_num)+'.sqlite'
dbpath=self.datadir+dbname
exp_id=CandidateData.exp_id
exp_data=sqldbutils.sim_data(dbpath,exp_id)
analysis=analysis.IClampAnalysis(exp_data.samples,exp_data.t,analysis_var,5000,10000)
exp_fitness=analysis.evaluate_fitness(optimizer_params.targets,optimizer_params.weights,cost_function=analysis.normalised_cost_function)
fitness.append(exp_fitness)
for job_num in range(self.num_jobs):
jobdbname='outputdb'+str(job_num)+'.sqlite'
jobdbpath=os.path.join(self.datadir,jobdbname)
print(jobdbpath)
os.remove(jobdbpath)
return fitness
'''
class PointBasedAnalysis(object):
def __init__(self, v, t):
self.v = numpy.array(v)
self.t = numpy.array(t)
def analyse(self, targets):
analysis_results = {}
for target in targets:
target_time = float(target.split('_')[1])
i=0
while self.t[i] < target_time:
value = self.v[i]
i+=1
analysis_results[target] = value
return analysis_results
class PointValueEvaluator(__Evaluator):
"""
Locally-evaluates (not using cluster or grid computing) a model.
The evaluate routine runs the model and returns its fitness value
"""
def __init__(self,
controller,
parameters,
weights,
targets=None):
super(PointValueEvaluator, self).__init__(parameters,
weights,
targets,
controller)
def evaluate(self,candidates,args):
print("\n>>>>> Evaluating: ")
for cand in candidates: print(">>>>> %s"%cand)
simulations_data = self.controller.run(candidates,
self.parameters)
fitness = []
for data in simulations_data:
times = data[0]
samples = data[1]
data_analysis = PointBasedAnalysis(samples,
times)
fitness_value = self.evaluate_fitness(data_analysis,
self.targets,
self.weights)
fitness.append(fitness_value)
print('Fitness: %s\n'%fitness_value)
return fitness
def evaluate_fitness(self,
data_analysis,
target_dict={},
target_weights=None,
cost_function=normalised_cost_function):
"""
Return the estimated fitness of the data, based on the cost function being used.
:param data_analysis: PointBasedAnalysis instance
:param target_dict: key-value pairs for targets
:param target_weights: key-value pairs for target weights
:param cost_function: cost function (callback) to assign individual targets sub-fitness.
"""
fitness = 0
analysed = data_analysis.analyse(target_dict)
for target in target_dict.keys():
target_value=target_dict[target]
if target_weights == None:
target_weight = 1
else:
if target in target_weights.keys():
target_weight = target_weights[target]
else:
target_weight = 1.0
if target_weight > 0:
#let function pick Q automatically
inc = target_weight * cost_function(analysed[target], target_value)
fitness += inc
print('Target %s (weight %s): target val: %s, actual: %s, fitness increment: %s'%(target, target_weight, target_value, analysed[target], inc))
return fitness
| |
from __future__ import print_function
import six
import socket
import keyring
import requests
from threading import Lock
from suds.client import Client
from suds.transport.http import HttpAuthenticated
from .proxies import *
from .util import async_dispatch, nodesort, to_base64, from_base64
from . import cache
@six.python_2_unicode_compatible
class Node(object):
'''A single Halon node.
:ivar str name: The configured name of the node.
:ivar halon.models.NodeList cluster: The cluster the node belongs to.
:ivar str scheme: The scheme the node should be accessed over, either http or https
:ivar str host: The hostname of the node
:ivar str username: The effective username; the node's, if any, otherwise the cluster's
:ivar str password: The effective password; the node's or keychain's, if any, otherwise the cluster's
'''
name = u"noname"
cluster = None
scheme = 'http'
host = None
no_verify = False
local_username = None
local_password = None
session = requests.Session()
@property
def service(self):
'''A proxy that can be used to make SOAP calls to the node.
:rtype: :class:`halon.proxies.NodeSoapProxy`
'''
return NodeSoapProxy(self)
@property
def url(self):
'''The base URL for the node.'''
return "{scheme}://{host}/remote/".format(scheme=self.scheme, host=self.host)
@property
def username(self):
return self.local_username or self.cluster.username
@username.setter
def username(self, val):
self.local_username = val
@property
def password(self):
return self.local_password or self.keyring_password or self.cluster.password
@password.setter
def password(self, val):
self.local_password = val
@property
def keyring_password(self):
if not hasattr(self, '_keyring_password') and self.host and self.username:
self._keyring_password = keyring.get_password(self.host, self.username)
return getattr(self, '_keyring_password', None)
def __init__(self, data=None, name=None, cluster=None, load_wsdl=False):
'''Initializes a Node with the given configuration data and name.'''
self.name = name
self.cluster = cluster if not cluster is None else NodeList([self])
if data:
self.load_data(data)
if load_wsdl:
self.load_wsdl()
def load_data(self, s):
'''Updates the node's data from the given configuration string,
overwriting any existing data.'''
remainder = s
# Split out any scheme
parts = remainder.split('://', 1)
if len(parts) == 2:
self.scheme = parts[0]
remainder = parts[1]
# Split the host from the credentials
parts = remainder.split('@', 1)
if len(parts) == 2:
remainder = parts[0]
self.host = parts[1]
# Credentials may or may not include the password
parts = remainder.split(':', 1)
if len(parts) == 2:
self.username = parts[0]
self.password = parts[1]
else:
self.username = parts[0]
else:
self.host = parts[0]
def load_wsdl(self):
'''Loads the cached WSDL file.
This is called automatically the first time a SOAP call is attempted,
or you may call it yourself on startup to e.g. create a bunch of
clients at once over a bunch of threads.'''
if not hasattr(self, '_client'):
self._client = Client("file:{0}".format(cache.get_path('wsdl.xml')), location=self.url, faults=False, nosend=True)
self._client.set_options(cache=None)
def make_request(self, name_, *args, **kwargs):
'''Convenience function that creates a SOAP request context from a
function name and a set of parameters.
The first call to this function is blocking, as the node's WSDL file
will be downloaded synchronously.'''
self.load_wsdl()
return getattr(self._client.service, name_)(*args, **kwargs)
def command(self, command, *args, **kwargs):
'''Convenience function that executes a command on the node, and returns
a CommandProxy that can be used to iterate the command's output, or interact
with the running process.
Note that ``args`` are the command's arguments (first one is the
command name), while ``kwargs`` controls how it's executed, specified
by the following flags:
* ``size`` - the viewport size as (cols, rows), defaults to (80,24)
* ``cols``, ``rows`` - individual components of ``size``
'''
# Allow calls as command("cmd", "arg1", "arg2") or command("cmd arg1 arg2")
parts = [command] + list(args) if args else command.split(' ')
# Allow size to be specified as size=(cols,rows) or cols=,rows=
size = kwargs.get('size', (80, 24))
size = (kwargs.get('cols', size[0]), kwargs.get('rows', size[1]))
code, cid = self.service.commandRun(argv={'item': [to_base64(part) for part in parts]}, cols=size[0], rows=size[1])
return (200, CommandProxy(self, cid)) if code == 200 else (code, None)
def __str__(self):
s = u"{name} ({host})".format(name=self.name, host=self.host)
if self.cluster.name:
s = u"{cluster}/{s}".format(cluster=self.cluster.name, s=s)
return s
def __repr__(self):
return "Node(name={name}, cluster=<{cluster}>)".format(name=self.name, cluster=self.cluster.name if self.cluster else None)
@six.python_2_unicode_compatible
class NodeList(list):
'''A list of Halon nodes.
It's a regular list for all intents and purposes, but with the added
benefit of keeping track of credentials, and the ability to execute SOAP
calls, either synchronously on one node at a time, or asynchronously on all
of them at once.
'''
name = None
local_username = None
local_password = None
@property
def username(self):
if not self.local_username:
for node in [node for node in self if node.local_username]:
return node.local_username
return self.local_username
@property
def password(self):
if not self.local_password:
for node in [node for node in self if node.local_password or node.keyring_password]:
return node.password or node.keyring_password
return self.local_password
@property
def service(self):
'''An asynchronous SOAP proxy.
This is the recommended way to target multiple nodes with a call, as it
will only take as long as the slowest node takes to respond, rather
than taking longer and longer the mode nodes you're targeting.
:rtype: :class:`halon.proxies.NodeListSoapProxy`
'''
return NodeListSoapProxy(self)
def command(self, command, *args):
'''Executes a command across all contained nodes.'''
return nodesort(async_dispatch({ node: (node.command, (command,) + args) for node in self }))
def load_data(self, data):
'''Updates the nodelist's data from the given configuration dictionary,
overwriting any existing data.'''
if 'username' in data:
self.local_username = data['username']
if 'password' in data:
self.local_password = data['password']
def __str__(self):
return u"{name} -> [{nodes}]".format(name=self.name, nodes=', '.join([node.name for node in self]))
| |
import getpass
import importlib
import json, glob
from sys import stderr
import sys
import semver_adapter
from constants import *
from errors import CleanDirError
import imp
from pygit2 import Signature, GIT_OBJ_COMMIT, GIT_SORT_TOPOLOGICAL, GIT_STATUS_CURRENT
MODULE_EXTENSIONS = ('.py', '.pyc', '.pyo')
def get_commands():
"""
Get all main commands of the app. This looks into COMMAND_DIR, every folder in it is a command
:return: list of all commands
"""
import commands, pkgutil
return [modname for importer, modname, ispkg in pkgutil.iter_modules(commands.__path__) if ispkg]
def is_file_extension(filename, extensions = IGNORED_EXTENSIONS):
name, extesion = path.splitext(filename)
if extesion[1:] in extensions:
return True
return False
def commit(repo, message='init package', branch=APP_GIT_BRANCH, init=False):
"""
Create a commit for all changes in a given repo
:param repo: The repo to be committed
:param message: Message for the commit
:param branch: The branch to commit, if not specified, master branch will be used
:param init: indicates whether this commit is a initial commit. This is use to initialize git repo since git
requires a commit to use branch
:return: None
"""
if repo.status() == {} and not init:
raise CleanDirError('No changes detected')
index = repo.index
# Add all changes to git
index.add_all()
index.write()
tree = index.write_tree()
author = Signature(USER['name'], USER['email'])
# Get branch to push
branch = repo.lookup_branch(branch)
# Create commit
repo.create_commit(
APP_GIT_REF,
author, author, message,
tree,
[] if len(repo.listall_branches()) == 0 else [branch.target]
)
def create_tag(repo, name, branch=APP_GIT_BRANCH):
branch = repo.lookup_branch(branch)
author = Signature(USER['name'], USER['email'])
target = branch.target
repo.create_tag(name, target, GIT_OBJ_COMMIT, author, name)
def login():
"""
Ask username and password
:return:
"""
username = raw_input("Email: ")
password = getpass.getpass()
return username, password
def ask_package_info():
"""
Ask user for package information to be stored in .json file
:return:
"""
json = {}
while "name" not in json or json['name'] == None:
json['name'] = raw_input("Package name: ")
while "version" not in json or json['version'] == None:
try:
json['version'] = raw_input("Version: ")
semver_adapter.parse(json['version'])
except Exception as e:
json['version'] = None
print 'A Sematic Version number is required.'
return json
# Find the root path of a project: check for .coolbee
def find_root(current_path = USER_CURRENT_DIR):
for f in [tmp for tmp in listdir(current_path) if path.isdir(path.join(
current_path, tmp))]:
if f == APP_GIT_FOLDER_NAME:
return current_path
return find_root(os.path.dirname(current_path))
# Find the root path of a project: check for json file
def find_root_json(current_path=USER_CURRENT_DIR, target=APP_JSON):
# If root is reached, raise error
if path.split(current_path)[1] == '':
raise IOError('File {0} not found.'.format(target))
if target in [f for f in listdir(current_path) if path.isfile(path.join(
current_path, f))]:
return current_path
return find_root_json(os.path.dirname(current_path), target=target)
def read_json(filepath):
try:
with open(filepath) as data_file:
return json.load(data_file)
except IOError:
raise IOError('Could not read file: '+ filepath)
except ValueError:
raise ValueError('Invalid json ' + filepath)
# Read package json in the root project directory
def read_package_json(filename=APP_JSON):
return read_json(path.join(find_root_json(target=filename), filename))
# Veryfy is the package is initialized correctly
def verify_package():
# check if the directory is init yet
# if yes, go on
# if no, exit
# if not path.isdir(path.join(find_root(), APP_GIT_FOLDER_NAME)):
# stderr.write('Error: This is not a {0} package.\n'.format(APP_NAME))
# sys.exit(1)
# check if APP_JSON exists
# if yes, go on
# if no, exit
# if not path.isfile(path.join(find_root(), APP_JSON)):
# stderr.write('Error: Missing {0} file.\n'.format(APP_JSON))
# sys.exit(1)
pass
def get_commits(repo, target = None, order = GIT_SORT_TOPOLOGICAL):
if target is None:
target = repo.head.target
dict = {}
for commit in repo.walk(target, order):
if commit.message in dict.keys():
raise KeyError('Version must be unique')
dict[commit.message] = commit
return dict
def get_commit_list(repo, target = None, order = GIT_SORT_TOPOLOGICAL):
if target is None:
target = repo.head.target
list = []
commits = get_commits(repo=repo, target=target, order=order)
for commit in commits:
list.append(commit)
return list
# Get all version of a cached package
def get_versions_cached(repo, target = None, order = GIT_SORT_TOPOLOGICAL):
if target is None:
target = repo.head.target
versions = get_commits(repo=repo, target=target, order=order)
return versions
# Get all engines
def get_engines():
engines = []
for file in listdir(ENGINE_DIR):
if path.isfile(path.join(ENGINE_DIR, file)) \
and file != '__init__.py' \
and not is_file_extension(file, extensions = IGNORED_EXTENSIONS):
name, extesion = path.splitext(file)
engines.append(name)
return engines
# Get a engine object
def get_engine(engine='default'):
path= '{0}.{1}'.format(ENGINE_DIR_NAME, engine)
module = importlib.import_module(path)
return getattr(module, engine.capitalize())
| |
#!/usr/bin/env python3
# coding: utf-8
import os
import sys
import subprocess
import shutil
import time
import json
import argparse
import traceback
import requests
from random import randint
from subprocess import PIPE
from bitcoin import rpc
from bitcoin import SelectParams
from shutil import copyfile
TEST_SWARM_PORT = randint(1024, 65535)
TEST_GATEWAY_PORT = randint(1024, 65535)
BOOTSTRAP_NODES = [
"/ip4/127.0.0.1/tcp/" + str(TEST_SWARM_PORT + 0) + "/ipfs/Qmdo6RpKtSqk73gUwaiaPkq6gWk49y3NCPCQbVsM9XTma3",
"/ip4/127.0.0.1/tcp/" + str(TEST_SWARM_PORT + 1) + "/ipfs/QmVQzkdi3Fq6LRFG9UNqDZfSry67weCZV6ZL26QVx64UFy",
"/ip4/127.0.0.1/tcp/" + str(TEST_SWARM_PORT + 2) + "/ipfs/Qmd5qDpcYkHCmkj9pMXU9TKBqEDWgEmtoHD5xjdJgumaHg"
]
BOOTSTAP_MNEMONICS = [
"today summer matter always angry crumble rib lucky park shoulder police puppy",
"husband letter control display skin tennis this expand garbage boil pig exchange",
"resist museum dizzy there pulp suspect dust useless drama grab visa trumpet"
]
class TestFailure(Exception):
pass
class OpenBazaarTestFramework(object):
def __init__(self):
self.nodes = []
self.bitcoin_api = None
def setup_nodes(self):
for i in range(self.num_nodes):
self.configure_node(i)
self.start_node(self.nodes[i])
def setup_network(self):
if self.bitcoind is not None:
self.start_bitcoind()
self.setup_nodes()
def run_test(self):
raise NotImplementedError
def send_bitcoin_cmd(self, *args):
try:
return self.bitcoin_api.call(*args)
except BrokenPipeError:
self.bitcoin_api = rpc.Proxy(btc_conf_file=self.btc_config)
return self.send_bitcoin_cmd(*args)
def configure_node(self, n):
dir_path = os.path.join(self.temp_dir, "openbazaar-go", str(n))
args = [self.binary, "init", "-d", dir_path, "--testnet"]
if n < 3:
args.extend(["-m", BOOTSTAP_MNEMONICS[n]])
process = subprocess.Popen(args, stdout=PIPE)
self.wait_for_init_success(process)
with open(os.path.join(dir_path, "config")) as cfg:
config = json.load(cfg)
config["Addresses"]["Gateway"] = "/ip4/127.0.0.1/tcp/" + str(TEST_GATEWAY_PORT + n)
config["Addresses"]["Swarm"] = ["/ip4/127.0.0.1/tcp/" + str(TEST_SWARM_PORT + n)]
to_boostrap = []
for node in BOOTSTRAP_NODES:
if config["Addresses"]["Swarm"][0] not in node:
to_boostrap.append(node)
config["Bootstrap-testnet"] = to_boostrap
config["Wallet"]["TrustedPeer"] = "127.0.0.1:18444"
config["Wallet"]["FeeAPI"] = ""
config["Crosspost-gateways"] = []
config["Swarm"]["DisableNatPortMap"] = True
if self.bitcoincash:
config["Wallet"]["Type"] = "bitcoincash"
with open(os.path.join(dir_path, "config"), 'w') as outfile:
outfile.write(json.dumps(config, indent=4))
node = {
"data_dir": dir_path,
"gateway_url": "http://localhost:" + str(TEST_GATEWAY_PORT + n) + "/",
"swarm_port": str(TEST_SWARM_PORT + n)
}
self.nodes.append(node)
@staticmethod
def wait_for_init_success(process):
while True:
if process.poll() is not None:
raise Exception("OpenBazaar node initialization failed")
output = process.stdout
for o in output:
if "OpenBazaar repo initialized" in str(o):
return
def start_node(self, node):
args = [self.binary, "start", "-v", "-d", node["data_dir"], *self.options]
process = subprocess.Popen(args, stdout=PIPE)
peerId = self.wait_for_start_success(process, node)
node["peerId"] = peerId
@staticmethod
def wait_for_start_success(process, node):
peerId = ""
while True:
if process.poll() is not None:
raise Exception("OpenBazaar node failed to start")
output = process.stdout
for o in output:
if "Peer ID:" in str(o):
peerId = str(o)[str(o).index("Peer ID:") + 10:len(str(o)) - 3]
if "Gateway/API server listening" in str(o):
return peerId
def start_bitcoind(self):
SelectParams('regtest')
dir_path = os.path.join(self.temp_dir, "openbazaar-go", "bitcoin")
if not os.path.exists(dir_path):
os.makedirs(dir_path)
btc_conf_file = os.path.join(dir_path, "bitcoin.conf")
copyfile(os.path.join(os.getcwd(), "testdata", "bitcoin.conf"), btc_conf_file)
self.btc_config = btc_conf_file
args = [self.bitcoind, "-regtest", "-datadir=" + dir_path, "-debug=net"]
process = subprocess.Popen(args, stdout=PIPE)
self.wait_for_bitcoind_start(process, btc_conf_file)
self.init_blockchain()
def init_blockchain(self):
self.send_bitcoin_cmd("generate", 1)
self.bitcoin_address = self.send_bitcoin_cmd("getnewaddress")
self.send_bitcoin_cmd("generatetoaddress", 1, self.bitcoin_address)
self.send_bitcoin_cmd("generate", 435)
def wait_for_bitcoind_start(self, process, btc_conf_file):
while True:
if process.poll() is not None:
raise Exception('bitcoind exited with status %i during initialization' % process.returncode)
try:
self.bitcoin_api = rpc.Proxy(btc_conf_file=btc_conf_file)
blocks = self.bitcoin_api.getblockcount()
break # break out of loop on success
except Exception:
time.sleep(0.25)
continue
def print_logs(self, node, log):
f = open(os.path.join(node["data_dir"], "logs", log), 'r')
file_contents = f.read()
print()
print("~~~~~~~~~~~~~~~~~~~~~~ " + log + " ~~~~~~~~~~~~~~~~~~~~~~")
print (file_contents)
print()
f.close()
def teardown(self):
for n in self.nodes:
requests.post(n["gateway_url"] + "ob/shutdown")
time.sleep(2)
if self.bitcoin_api is not None:
try:
self.send_bitcoin_cmd("stop")
except BrokenPipeError:
pass
time.sleep(10)
def main(self, options=["--disablewallet", "--testnet", "--disableexchangerates"]):
parser = argparse.ArgumentParser(
description="OpenBazaar Test Framework",
usage="python3 test_framework.py [options]"
)
parser.add_argument('-b', '--binary', required=True, help="the openbazaar-go binary")
parser.add_argument('-d', '--bitcoind', help="the bitcoind binary")
parser.add_argument('-t', '--tempdir', action='store_true', help="temp directory to store the data folders", default="/tmp/")
parser.add_argument('-c', '--bitcoincash', help="test with bitcoin cash", action='store_true', default=False)
args = parser.parse_args(sys.argv[1:])
self.binary = args.binary
self.temp_dir = args.tempdir
self.bitcoind = args.bitcoind
self.bitcoincash = args.bitcoincash
self.options = options
try:
shutil.rmtree(os.path.join(self.temp_dir, "openbazaar-go"))
except:
pass
failure = False
try:
self.setup_network()
self.run_test()
except TestFailure as e:
print(repr(e))
failure = True
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
failure = True
self.teardown()
if failure:
sys.exit(1)
| |
# -*- coding: utf-8 -*-
from django.contrib.admin import site
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
try:
# Not supported after Django 1.8
from django.db.models import get_model
except ImportError:
# Only from Django 1.9 onwards
from django.apps import apps
get_model = apps.get_model
from django.forms.models import modelform_factory
from django.http import HttpResponseRedirect, HttpResponseForbidden
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.template.loader import get_template
from django.template import TemplateDoesNotExist
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
try:
# For old versions of Django supporting py2.6
from django.utils.importlib import import_module
except ImportError:
from importlib import import_module
from django.conf import settings
from django.forms import CharField
from django.contrib import messages
from .forms import DeleteRequestForm, FrontendAdminModelForm
EXCLUDES = getattr(settings, 'FRONTEND_EXCLUDES', {})
FIELDS = getattr(settings, 'FRONTEND_FIELDS', {})
FORMS = getattr(settings, 'FRONTEND_FORMS', {})
def import_function(s):
"""
Import a function given the string formatted as
`module_name.function_name` (eg `django.utils.text.capfirst`)
"""
a = s.split('.')
j = lambda x: '.'.join(x)
return getattr(import_module(j(a[:-1])), a[-1])
def check_permission(request, mode_name, app_label, model_name):
'''
Check for proper permissions. mode_name may be either add, change or delete.
'''
p = '%s.%s_%s' % (app_label, mode_name, model_name)
return request.user.is_active and request.user.has_perm(p)
def _get_instance(request, mode_name, app_label, model_name, instance_id=None,
form=None,
form_fields=None,
form_exclude=None):
'''
Returns the model and an instance_form for the given arguments. If an primary
key (instance_id) is given, it will return also the instance.
If the user has no permission to add, change or delete the object, a
HttpResponse is returned.
'''
# Check for permission to add/change/delete this object
if not check_permission(request, mode_name, app_label, model_name):
return HttpResponseForbidden('You have no permission to do this!')
try:
model = get_model(app_label, model_name)
# Model does not exist
except AttributeError:
return HttpResponseForbidden('This model does not exist!')
label = '%s.%s' % (app_label, model_name)
# get form for model
if label in FORMS and not form:
form = import_function(FORMS[label])
elif model in site._registry and not form:
form = site._registry[model].form
elif form is None:
form = FrontendAdminModelForm
if label in EXCLUDES:
form_exclude = EXCLUDES[label]
if label in FIELDS:
form_fields = FIELDS[label]
instance_form = modelform_factory(model, form=form,
fields=form_fields, exclude=form_exclude)
# if instance_id is set, grab this model object
if instance_id:
instance = model.objects.get(pk=instance_id)
return model, instance_form, instance
return model, instance_form
def _handle_cancel(request, instance=None):
'''
Handles clicks on the 'Cancel' button in forms. Returns a redirect to the
last page, the user came from. If not given, to the detail-view of
the object. Last fallback is a redirect to the common success page.
'''
if request.POST.get('_cancel', False):
if request.GET.get('next', False):
return HttpResponseRedirect(request.GET.get('next'))
if instance and hasattr(instance, 'get_absolute_url'):
return HttpResponseRedirect(instance.get_absolute_url())
return HttpResponseRedirect(reverse('frontendadmin_success'))
return None
def _handle_response(request, instance=None):
'''
Handles redirects for completet form actions. Returns a redirect to the
last page, the user came from. If not given, to the detail-view of
the object. Last fallback is a redirect to the common success page.
'''
if 'next' in request.REQUEST:
return HttpResponseRedirect(request.REQUEST['next'])
if instance and hasattr(instance, 'get_absolute_url'):
return HttpResponseRedirect(instance.get_absolute_url())
return HttpResponseRedirect(reverse('frontendadmin_success'))
def _find_template(template_name, app_label=None, model_name=None):
"""
Finds a template_name for the given, optional ``app_label`` . ``model_name``
"""
if app_label is None and model_name is None:
return 'frontendadmin/%s' % template_name
try:
name = 'frontendadmin/%s_%s_%s' % (app_label, model_name, template_name)
get_template(name)
return name
except TemplateDoesNotExist:
return 'frontendadmin/%s' % template_name
def _get_template(request, app_label=None, model_name=None):
'''
Returns wether the ajax or the normal (full blown) template.
'''
return _find_template(request.is_ajax() and 'form_ajax.html' or 'form.html',
app_label, model_name)
@never_cache
@login_required
def add(request, app_label, model_name, mode_name='add',
form_fields=None,
form_exclude=None):
# Get model, instance_form and instance for arguments
instance_return = _get_instance(request, mode_name, app_label, model_name,
form_fields=form_fields,
form_exclude=form_exclude)
if isinstance(instance_return, HttpResponseForbidden):
return instance_return
model, instance_form = instance_return
# Handle cancel request
cancel = _handle_cancel(request)
if cancel:
return cancel
if request.method == 'POST':
form = instance_form(request.POST, request.FILES)
if form.is_valid():
instance = form.save()
# Give the user a nice message
msg=_(u'Your %(model_name)s was added successfully') % \
{'model_name': model._meta.verbose_name}
try:
request.user.message_set.create(message=msg)
except AttributeError:
messages.success(request, msg)
# Return to last page
if request.is_ajax():
return success(request)
return _handle_response(request, instance)
else:
form = instance_form()
template_context = {
'action': 'add',
'action_url': request.get_full_path(),
'model_title': model._meta.verbose_name,
'form': form
}
return render_to_response(
_get_template(request, app_label, model_name),
template_context,
RequestContext(request)
)
@never_cache
@login_required
def change(request, app_label, model_name, instance_id, mode_name='change',
form_fields=None,
form_exclude=None):
# Get model, instance_form and instance for arguments
instance_return = _get_instance(request, mode_name, app_label, model_name,
instance_id,
form_fields=form_fields,
form_exclude=form_exclude)
if isinstance(instance_return, HttpResponseForbidden):
return instance_return
model, instance_form, instance = instance_return
# Handle cancel request
cancel = _handle_cancel(request)
if cancel:
return cancel
if request.method == 'POST':
form = instance_form(request.POST, request.FILES, instance=instance)
if form.is_valid():
instance = form.save()
msg=_(u'Your %(model_name)s was changed successfully') % \
{'model_name': model._meta.verbose_name}
# Give the user a nice message
try:
request.user.message_set.create(message=msg)
except AttributeError:
messages.success(request, msg)
# Return to success page
if request.is_ajax():
return success(request)
return _handle_response(request, instance)
else:
form = instance_form(instance=instance)
template_context = {
'action': 'change',
'action_url': request.get_full_path(),
'model_title': model._meta.verbose_name,
'form': form,
}
return render_to_response(
_get_template(request, app_label, model_name),
template_context,
RequestContext(request)
)
@never_cache
@login_required
def delete(request, app_label, model_name, instance_id,
delete_form=DeleteRequestForm):
# Get model, instance_form and instance for arguments
instance_return = _get_instance(request, 'delete', app_label, model_name, instance_id)
if isinstance(instance_return, HttpResponseForbidden):
return instance_return
model, instance_form, instance = instance_return
# Handle cancel request
cancel = _handle_cancel(request)
if cancel:
return cancel
if request.method == 'POST':
form = delete_form(request.POST)
if form.is_valid():
instance.delete()
# Give the user a nice message
msg=_(u'Your %(model_name)s was deleted.') % \
{'model_name': model._meta.verbose_name}
try:
request.user.message_set.create(message=msg)
except AttributeError:
messages.success(request, msg)
# Return to last page
if request.is_ajax():
return success_delete(request)
return _handle_response(request, instance)
else:
form = delete_form()
template_context = {
'action': 'delete',
'action_url': request.get_full_path(),
'model_title': model._meta.verbose_name,
'form': form,
}
return render_to_response(
_get_template(request, None, None),
template_context,
RequestContext(request)
)
def success(request, template_name='success.html', template_ajax='success_ajax.html'):
'''
First, a view would redirect to the last page the user came from. If
this is not available (because somebody fiddled in the url), we redirect
to this common success page.
Normally a user should never see this page.
'''
template = _find_template(request.is_ajax() and template_ajax or template_name)
return render_to_response(template, {}, RequestContext(request))
def success_delete(request, template_name='success_delete.html', template_ajax='success_delete_ajax.html'):
'''
Normally a view would redirect to the last page. After delete from a object
in a detail-view, there is no "last page" so we redirect to a unique, shiny
success-page.
'''
template = _find_template(request.is_ajax() and template_ajax or template_name)
return render_to_response(template, {}, RequestContext(request))
| |
'''Various extensions to distributions
* skew normal and skew t distribution by Azzalini, A. & Capitanio, A.
* Gram-Charlier expansion distribution (using 4 moments),
* distributions based on non-linear transformation
- Transf_gen
- ExpTransf_gen, LogTransf_gen
- TransfTwo_gen
(defines as examples: square, negative square and abs transformations)
- this versions are without __new__
* mnvormcdf, mvstdnormcdf : cdf, rectangular integral for multivariate normal
distribution
TODO:
* Where is Transf_gen for general monotonic transformation ? found and added it
* write some docstrings, some parts I don't remember
* add Box-Cox transformation, parameterized ?
this is only partially cleaned, still includes test examples as functions
main changes
* add transf_gen (2010-05-09)
* added separate example and tests (2010-05-09)
* collect transformation function into classes
Example
-------
>>> logtg = Transf_gen(stats.t, np.exp, np.log,
numargs = 1, a=0, name = 'lnnorm',
longname = 'Exp transformed normal',
extradoc = '\ndistribution of y = exp(x), with x standard normal'
'precision for moment andstats is not very high, 2-3 decimals')
>>> logtg.cdf(5, 6)
0.92067704211191848
>>> stats.t.cdf(np.log(5), 6)
0.92067704211191848
>>> logtg.pdf(5, 6)
0.021798547904239293
>>> stats.t.pdf(np.log(5), 6)
0.10899273954837908
>>> stats.t.pdf(np.log(5), 6)/5. #derivative
0.021798547909675815
Author: josef-pktd
License: BSD
'''
#note copied from distr_skewnorm_0.py
from __future__ import print_function
from statsmodels.compat.python import range, iteritems
from scipy import stats, special, integrate # integrate is for scipy 0.6.0 ???
from scipy.stats import distributions
from statsmodels.stats.moment_helpers import mvsk2mc, mc2mvsk
import numpy as np
class SkewNorm_gen(distributions.rv_continuous):
'''univariate Skew-Normal distribution of Azzalini
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self):
#super(SkewNorm_gen,self).__init__(
distributions.rv_continuous.__init__(self,
name = 'Skew Normal distribution', shapes = 'alpha',
extradoc = ''' ''' )
def _argcheck(self, alpha):
return 1 #(alpha >= 0)
def _rvs(self, alpha):
# see http://azzalini.stat.unipd.it/SN/faq.html
delta = alpha/np.sqrt(1+alpha**2)
u0 = stats.norm.rvs(size=self._size)
u1 = delta*u0 + np.sqrt(1-delta**2)*stats.norm.rvs(size=self._size)
return np.where(u0>0, u1, -u1)
def _munp(self, n, alpha):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf, which is much slower
return self._mom0_sc(n, alpha)
def _pdf(self,x,alpha):
# 2*normpdf(x)*normcdf(alpha*x)
return 2.0/np.sqrt(2*np.pi)*np.exp(-x**2/2.0) * special.ndtr(alpha*x)
def _stats_skip(self,x,alpha,moments='mvsk'):
#skip for now to force moment integration as check
pass
skewnorm = SkewNorm_gen()
# generated the same way as distributions in stats.distributions
class SkewNorm2_gen(distributions.rv_continuous):
'''univariate Skew-Normal distribution of Azzalini
class follows scipy.stats.distributions pattern
'''
def _argcheck(self, alpha):
return 1 #where(alpha>=0, 1, 0)
def _pdf(self,x,alpha):
# 2*normpdf(x)*normcdf(alpha*x
return 2.0/np.sqrt(2*np.pi)*np.exp(-x**2/2.0) * special.ndtr(alpha*x)
skewnorm2 = SkewNorm2_gen(name = 'Skew Normal distribution', shapes = 'alpha',
extradoc = ''' -inf < alpha < inf''')
class ACSkewT_gen(distributions.rv_continuous):
'''univariate Skew-T distribution of Azzalini
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self):
#super(SkewT_gen,self).__init__(
distributions.rv_continuous.__init__(self,
name = 'Skew T distribution', shapes = 'df, alpha',
extradoc = '''
Skewed T distribution by Azzalini, A. & Capitanio, A. (2003)_
the pdf is given by:
pdf(x) = 2.0 * t.pdf(x, df) * t.cdf(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))
with alpha >=0
Note: different from skewed t distribution by Hansen 1999
.._
Azzalini, A. & Capitanio, A. (2003), Distributions generated by perturbation of
symmetry with emphasis on a multivariate skew-t distribution,
appears in J.Roy.Statist.Soc, series B, vol.65, pp.367-389
''' )
def _argcheck(self, df, alpha):
return (alpha == alpha)*(df>0)
## def _arg_check(self, alpha):
## return np.where(alpha>=0, 0, 1)
## def _argcheck(self, alpha):
## return np.where(alpha>=0, 1, 0)
def _rvs(self, df, alpha):
# see http://azzalini.stat.unipd.it/SN/faq.html
#delta = alpha/np.sqrt(1+alpha**2)
V = stats.chi2.rvs(df, size=self._size)
z = skewnorm.rvs(alpha, size=self._size)
return z/np.sqrt(V/df)
def _munp(self, n, df, alpha):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf
return self._mom0_sc(n, df, alpha)
def _pdf(self, x, df, alpha):
# 2*normpdf(x)*normcdf(alpha*x)
return 2.0*distributions.t._pdf(x, df) * special.stdtr(df+1, alpha*x*np.sqrt((1+df)/(x**2+df)))
##
##def mvsk2cm(*args):
## mu,sig,sk,kur = args
## # Get central moments
## cnt = [None]*4
## cnt[0] = mu
## cnt[1] = sig #*sig
## cnt[2] = sk * sig**1.5
## cnt[3] = (kur+3.0) * sig**2.0
## return cnt
##
##
##def mvsk2m(args):
## mc, mc2, skew, kurt = args#= self._stats(*args,**mdict)
## mnc = mc
## mnc2 = mc2 + mc*mc
## mc3 = skew*(mc2**1.5) # 3rd central moment
## mnc3 = mc3+3*mc*mc2+mc**3 # 3rd non-central moment
## mc4 = (kurt+3.0)*(mc2**2.0) # 4th central moment
## mnc4 = mc4+4*mc*mc3+6*mc*mc*mc2+mc**4
## return (mc, mc2, mc3, mc4), (mnc, mnc2, mnc3, mnc4)
##
##def mc2mvsk(args):
## mc, mc2, mc3, mc4 = args
## skew = mc3 / mc2**1.5
## kurt = mc4 / mc2**2.0 - 3.0
## return (mc, mc2, skew, kurt)
##
##def m2mc(args):
## mnc, mnc2, mnc3, mnc4 = args
## mc = mnc
## mc2 = mnc2 - mnc*mnc
## #mc3 = skew*(mc2**1.5) # 3rd central moment
## mc3 = mnc3 - (3*mc*mc2+mc**3) # 3rd central moment
## #mc4 = (kurt+3.0)*(mc2**2.0) # 4th central moment
## mc4 = mnc4 - (4*mc*mc3+6*mc*mc*mc2+mc**4)
## return (mc, mc2, mc3, mc4)
from numpy import poly1d,sqrt, exp
import scipy
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None]*N
plist[0] = poly1d(1)
for n in range(1,N):
plist[n] = plist[n-1].deriv() - poly1d([1,0])*plist[n-1]
return plist
def pdf_moments_st(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
version of scipy.stats, any changes ?
the scipy.stats version has a bug and returns normal distribution
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N+1)
for k in range(3,N+1):
# Find Ck
Ck = 0.0
for n in range((k-3)/2):
m = k-2*n
if m % 2: # m is odd
momdiff = cnt[m-1]
else:
momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
Ck += Dvals[k][m] / sig**m * momdiff
# Add to totp
raise SystemError
print(Dvals)
print(Ck)
totp = totp + Ck*Dvals[k]
def thisfunc(x):
xn = (x-mu)/sig
return totp(xn)*exp(-xn*xn/2.0)/sqrt(2*np.pi)/sig
return thisfunc, totp
def pdf_mvsk(mvsk):
"""Return the Gaussian expanded pdf function given the list of 1st, 2nd
moment and skew and Fisher (excess) kurtosis.
Parameters
----------
mvsk : list of mu, mc2, skew, kurt
distribution is matched to these four moments
Returns
-------
pdffunc : function
function that evaluates the pdf(x), where x is the non-standardized
random variable.
Notes
-----
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
http://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(mvsk)
if N < 4:
raise ValueError("Four moments must be given to "
"approximate the pdf.")
mu, mc2, skew, kurt = mvsk
totp = poly1d(1)
sig = sqrt(mc2)
if N > 2:
Dvals = _hermnorm(N+1)
C3 = skew/6.0
C4 = kurt/24.0
# Note: Hermite polynomial for order 3 in _hermnorm is negative
# instead of positive
totp = totp - C3*Dvals[3] + C4*Dvals[4]
def pdffunc(x):
xn = (x-mu)/sig
return totp(xn)*np.exp(-xn*xn/2.0)/np.sqrt(2*np.pi)/sig
return pdffunc
def pdf_moments(cnt):
"""Return the Gaussian expanded pdf function given the list of central
moments (first one is mean).
Changed so it works only if four arguments are given. Uses explicit
formula, not loop.
Notes
-----
This implements a Gram-Charlier expansion of the normal distribution
where the first 2 moments coincide with those of the normal distribution
but skew and kurtosis can deviate from it.
In the Gram-Charlier distribution it is possible that the density
becomes negative. This is the case when the deviation from the
normal distribution is too large.
References
----------
http://en.wikipedia.org/wiki/Edgeworth_series
Johnson N.L., S. Kotz, N. Balakrishnan: Continuous Univariate
Distributions, Volume 1, 2nd ed., p.30
"""
N = len(cnt)
if N < 2:
raise ValueError("At least two moments must be given to "
"approximate the pdf.")
mc, mc2, mc3, mc4 = cnt
skew = mc3 / mc2**1.5
kurt = mc4 / mc2**2.0 - 3.0 # Fisher kurtosis, excess kurtosis
totp = poly1d(1)
sig = sqrt(cnt[1])
mu = cnt[0]
if N > 2:
Dvals = _hermnorm(N+1)
## for k in range(3,N+1):
## # Find Ck
## Ck = 0.0
## for n in range((k-3)/2):
## m = k-2*n
## if m % 2: # m is odd
## momdiff = cnt[m-1]
## else:
## momdiff = cnt[m-1] - sig*sig*scipy.factorial2(m-1)
## Ck += Dvals[k][m] / sig**m * momdiff
## # Add to totp
## raise
## print Dvals
## print Ck
## totp = totp + Ck*Dvals[k]
C3 = skew/6.0
C4 = kurt/24.0
totp = totp - C3*Dvals[3] + C4*Dvals[4]
def thisfunc(x):
xn = (x-mu)/sig
return totp(xn)*np.exp(-xn*xn/2.0)/np.sqrt(2*np.pi)/sig
return thisfunc
class NormExpan_gen(distributions.rv_continuous):
'''Gram-Charlier Expansion of Normal distribution
class follows scipy.stats.distributions pattern
but with __init__
'''
def __init__(self,args, **kwds):
#todo: replace with super call
distributions.rv_continuous.__init__(self,
name = 'Normal Expansion distribution', shapes = ' ',
extradoc = '''
The distribution is defined as the Gram-Charlier expansion of
the normal distribution using the first four moments. The pdf
is given by
pdf(x) = (1+ skew/6.0 * H(xc,3) + kurt/24.0 * H(xc,4))*normpdf(xc)
where xc = (x-mu)/sig is the standardized value of the random variable
and H(xc,3) and H(xc,4) are Hermite polynomials
Note: This distribution has to be parameterized during
initialization and instantiation, and does not have a shape
parameter after instantiation (similar to frozen distribution
except for location and scale.) Location and scale can be used
as with other distributions, however note, that they are relative
to the initialized distribution.
''' )
#print args, kwds
mode = kwds.get('mode', 'sample')
if mode == 'sample':
mu,sig,sk,kur = stats.describe(args)[2:]
self.mvsk = (mu,sig,sk,kur)
cnt = mvsk2mc((mu,sig,sk,kur))
elif mode == 'mvsk':
cnt = mvsk2mc(args)
self.mvsk = args
elif mode == 'centmom':
cnt = args
self.mvsk = mc2mvsk(cnt)
else:
raise ValueError("mode must be 'mvsk' or centmom")
self.cnt = cnt
#self.mvsk = (mu,sig,sk,kur)
#self._pdf = pdf_moments(cnt)
self._pdf = pdf_mvsk(self.mvsk)
def _munp(self,n):
# use pdf integration with _mom0_sc if only _pdf is defined.
# default stats calculation uses ppf
return self._mom0_sc(n)
def _stats_skip(self):
# skip for now to force numerical integration of pdf for testing
return self.mvsk
## copied from nonlinear_transform_gen.py
''' A class for the distribution of a non-linear monotonic transformation of a continuous random variable
simplest usage:
example: create log-gamma distribution, i.e. y = log(x),
where x is gamma distributed (also available in scipy.stats)
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp)
example: what is the distribution of the discount factor y=1/(1+x)
where interest rate x is normally distributed with N(mux,stdx**2)')?
(just to come up with a story that implies a nice transformation)
invnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, a=-np.inf)
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
Note: I'm working from my version of scipy.stats.distribution.
But this script runs under scipy 0.6.0 (checked with numpy: 1.2.0rc2 and python 2.4)
This is not yet thoroughly tested, polished or optimized
TODO:
* numargs handling is not yet working properly, numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution is untested and incomplete
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
Created on Tuesday, October 28, 2008, 12:40:37 PM
Author: josef-pktd
License: BSD
'''
from scipy import integrate # for scipy 0.6.0
from scipy import stats, info
from scipy.stats import distributions
def get_u_argskwargs(**kwargs):
#Todo: What's this? wrong spacing, used in Transf_gen TransfTwo_gen
u_kwargs = dict((k.replace('u_','',1),v) for k,v in iteritems(kwargs)
if k.startswith('u_'))
u_args = u_kwargs.pop('u_args',None)
return u_args, u_kwargs
class Transf_gen(distributions.rv_continuous):
'''a class for non-linear monotonic transformation of a continuous random variable
'''
def __init__(self, kls, func, funcinv, *args, **kwargs):
#print args
#print kwargs
self.func = func
self.funcinv = funcinv
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf)
b = kwargs.pop('b', np.inf)
self.decr = kwargs.pop('decr', False)
#defines whether it is a decreasing (True)
# or increasing (False) monotonic transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(Transf_gen,self).__init__(a=a, b=b, name = name,
longname = longname, extradoc = extradoc)
def _rvs(self, *args, **kwargs):
self.kls._size = self._size
return self.funcinv(self.kls._rvs(*args))
def _cdf(self,x,*args, **kwargs):
#print args
if not self.decr:
return self.kls._cdf(self.funcinv(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self.kls._cdf(self.funcinv(x),*args, **kwargs)
def _ppf(self, q, *args, **kwargs):
if not self.decr:
return self.func(self.kls._ppf(q,*args, **kwargs))
else:
return self.func(self.kls._ppf(1-q,*args, **kwargs))
def inverse(x):
return np.divide(1.0,x)
mux, stdx = 0.05, 0.1
mux, stdx = 9.0, 1.0
def inversew(x):
return 1.0/(1+mux+x*stdx)
def inversew_inv(x):
return (1.0/x - 1.0 - mux)/stdx #.np.divide(1.0,x)-10
def identit(x):
return x
invdnormalg = Transf_gen(stats.norm, inversew, inversew_inv, decr=True, #a=-np.inf,
numargs = 0, name = 'discf', longname = 'normal-based discount factor',
extradoc = '\ndistribution of discount factor y=1/(1+x)) with x N(0.05,0.1**2)')
lognormalg = Transf_gen(stats.norm, np.exp, np.log,
numargs = 2, a=0, name = 'lnnorm',
longname = 'Exp transformed normal',
extradoc = '\ndistribution of y = exp(x), with x standard normal'
'precision for moment andstats is not very high, 2-3 decimals')
loggammaexpg = Transf_gen(stats.gamma, np.log, np.exp, numargs=1)
## copied form nonlinear_transform_short.py
'''univariate distribution of a non-linear monotonic transformation of a
random variable
'''
from scipy import stats
from scipy.stats import distributions
import numpy as np
class ExpTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#print args
#print kwargs
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(ExpTransf_gen,self).__init__(a=0, name = name)
self.kls = kls
def _cdf(self,x,*args):
pass
#print args
return self.kls.cdf(np.log(x),*args)
def _ppf(self, q, *args):
return np.exp(self.kls.ppf(q,*args))
class LogTransf_gen(distributions.rv_continuous):
'''Distribution based on log/exp transformation
the constructor can be called with a distribution class
and generates the distribution of the transformed random variable
'''
def __init__(self, kls, *args, **kwargs):
#explicit for self.__dict__.update(kwargs)
if 'numargs' in kwargs:
self.numargs = kwargs['numargs']
else:
self.numargs = 1
if 'name' in kwargs:
name = kwargs['name']
else:
name = 'Log transformed distribution'
if 'a' in kwargs:
a = kwargs['a']
else:
a = 0
super(LogTransf_gen,self).__init__(a=a, name = name)
self.kls = kls
def _cdf(self,x, *args):
#print args
return self.kls._cdf(np.exp(x),*args)
def _ppf(self, q, *args):
return np.log(self.kls._ppf(q,*args))
## copied from transformtwo.py
'''
Created on Apr 28, 2009
@author: Josef Perktold
'''
''' A class for the distribution of a non-linear u-shaped or hump shaped transformation of a
continuous random variable
This is a companion to the distributions of non-linear monotonic transformation to the case
when the inverse mapping is a 2-valued correspondence, for example for absolute value or square
simplest usage:
example: create squared distribution, i.e. y = x**2,
where x is normal or t distributed
This class does not work well for distributions with difficult shapes,
e.g. 1/x where x is standard normal, because of the singularity and jump at zero.
This verifies for normal - chi2, normal - halfnorm, foldnorm, and t - F
TODO:
* numargs handling is not yet working properly,
numargs needs to be specified (default = 0 or 1)
* feeding args and kwargs to underlying distribution works in t distribution example
* distinguish args and kwargs for the transformed and the underlying distribution
- currently all args and no kwargs are transmitted to underlying distribution
- loc and scale only work for transformed, but not for underlying distribution
- possible to separate args for transformation and underlying distribution parameters
* add _rvs as method, will be faster in many cases
'''
class TransfTwo_gen(distributions.rv_continuous):
'''Distribution based on a non-monotonic (u- or hump-shaped transformation)
the constructor can be called with a distribution class, and functions
that define the non-linear transformation.
and generates the distribution of the transformed random variable
Note: the transformation, it's inverse and derivatives need to be fully
specified: func, funcinvplus, funcinvminus, derivplus, derivminus.
Currently no numerical derivatives or inverse are calculated
This can be used to generate distribution instances similar to the
distributions in scipy.stats.
'''
#a class for non-linear non-monotonic transformation of a continuous random variable
def __init__(self, kls, func, funcinvplus, funcinvminus, derivplus,
derivminus, *args, **kwargs):
#print args
#print kwargs
self.func = func
self.funcinvplus = funcinvplus
self.funcinvminus = funcinvminus
self.derivplus = derivplus
self.derivminus = derivminus
#explicit for self.__dict__.update(kwargs)
#need to set numargs because inspection does not work
self.numargs = kwargs.pop('numargs', 0)
#print self.numargs
name = kwargs.pop('name','transfdist')
longname = kwargs.pop('longname','Non-linear transformed distribution')
extradoc = kwargs.pop('extradoc',None)
a = kwargs.pop('a', -np.inf) # attached to self in super
b = kwargs.pop('b', np.inf) # self.a, self.b would be overwritten
self.shape = kwargs.pop('shape', False)
#defines whether it is a `u` shaped or `hump' shaped
# transformation
self.u_args, self.u_kwargs = get_u_argskwargs(**kwargs)
self.kls = kls #(self.u_args, self.u_kwargs)
# possible to freeze the underlying distribution
super(TransfTwo_gen,self).__init__(a=a, b=b, name = name,
shapes = kls.shapes,
longname = longname, extradoc = extradoc)
# add enough info for self.freeze() to be able to reconstruct the instance
try:
self._ctor_param.update(dict(kls=kls, func=func,
funcinvplus=funcinvplus, funcinvminus=funcinvminus,
derivplus=derivplus, derivminus=derivminus,
shape=self.shape))
except AttributeError:
# scipy < 0.14 does not have this, ignore and do nothing
pass
def _rvs(self, *args):
self.kls._size = self._size #size attached to self, not function argument
return self.func(self.kls._rvs(*args))
def _pdf(self,x,*args, **kwargs):
#print args
if self.shape == 'u':
signpdf = 1
elif self.shape == 'hump':
signpdf = -1
else:
raise ValueError('shape can only be `u` or `hump`')
return signpdf * (self.derivplus(x)*self.kls._pdf(self.funcinvplus(x),*args, **kwargs) -
self.derivminus(x)*self.kls._pdf(self.funcinvminus(x),*args, **kwargs))
#note scipy _cdf only take *args not *kwargs
def _cdf(self,x,*args, **kwargs):
#print args
if self.shape == 'u':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._sf(x,*args, **kwargs)
def _sf(self,x,*args, **kwargs):
#print args
if self.shape == 'hump':
return self.kls._cdf(self.funcinvplus(x),*args, **kwargs) - \
self.kls._cdf(self.funcinvminus(x),*args, **kwargs)
#note scipy _cdf only take *args not *kwargs
else:
return 1.0 - self._cdf(x, *args, **kwargs)
def _munp(self, n,*args, **kwargs):
return self._mom0_sc(n,*args)
# ppf might not be possible in general case?
# should be possible in symmetric case
# def _ppf(self, q, *args, **kwargs):
# if self.shape == 'u':
# return self.func(self.kls._ppf(q,*args, **kwargs))
# elif self.shape == 'hump':
# return self.func(self.kls._ppf(1-q,*args, **kwargs))
#TODO: rename these functions to have unique names
class SquareFunc(object):
'''class to hold quadratic function with inverse function and derivative
using instance methods instead of class methods, if we want extension
to parameterized function
'''
def inverseplus(self, x):
return np.sqrt(x)
def inverseminus(self, x):
return 0.0 - np.sqrt(x)
def derivplus(self, x):
return 0.5/np.sqrt(x)
def derivminus(self, x):
return 0.0 - 0.5/np.sqrt(x)
def squarefunc(self, x):
return np.power(x,2)
sqfunc = SquareFunc()
squarenormalg = TransfTwo_gen(stats.norm, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 0, name = 'squarenorm', longname = 'squared normal distribution',
extradoc = '\ndistribution of the square of a normal random variable' +\
' y=x**2 with x N(0.0,1)')
#u_loc=l, u_scale=s)
squaretg = TransfTwo_gen(stats.t, sqfunc.squarefunc, sqfunc.inverseplus,
sqfunc.inverseminus, sqfunc.derivplus, sqfunc.derivminus,
shape='u', a=0.0, b=np.inf,
numargs = 1, name = 'squarenorm', longname = 'squared t distribution',
extradoc = '\ndistribution of the square of a t random variable' +\
' y=x**2 with x t(dof,0.0,1)')
def inverseplus(x):
return np.sqrt(-x)
def inverseminus(x):
return 0.0 - np.sqrt(-x)
def derivplus(x):
return 0.0 - 0.5/np.sqrt(-x)
def derivminus(x):
return 0.5/np.sqrt(-x)
def negsquarefunc(x):
return -np.power(x,2)
negsquarenormalg = TransfTwo_gen(stats.norm, negsquarefunc, inverseplus, inverseminus,
derivplus, derivminus, shape='hump', a=-np.inf, b=0.0,
numargs = 0, name = 'negsquarenorm', longname = 'negative squared normal distribution',
extradoc = '\ndistribution of the negative square of a normal random variable' +\
' y=-x**2 with x N(0.0,1)')
#u_loc=l, u_scale=s)
def inverseplus(x):
return x
def inverseminus(x):
return 0.0 - x
def derivplus(x):
return 1.0
def derivminus(x):
return 0.0 - 1.0
def absfunc(x):
return np.abs(x)
absnormalg = TransfTwo_gen(stats.norm, np.abs, inverseplus, inverseminus,
derivplus, derivminus, shape='u', a=0.0, b=np.inf,
numargs = 0, name = 'absnorm', longname = 'absolute of normal distribution',
extradoc = '\ndistribution of the absolute value of a normal random variable' +\
' y=abs(x) with x N(0,1)')
#copied from mvncdf.py
'''multivariate normal probabilities and cumulative distribution function
a wrapper for scipy.stats.kde.mvndst
SUBROUTINE MVNDST( N, LOWER, UPPER, INFIN, CORREL, MAXPTS,
& ABSEPS, RELEPS, ERROR, VALUE, INFORM )
*
* A subroutine for computing multivariate normal probabilities.
* This subroutine uses an algorithm given in the paper
* "Numerical Computation of Multivariate Normal Probabilities", in
* J. of Computational and Graphical Stat., 1(1992), pp. 141-149, by
* Alan Genz
* Department of Mathematics
* Washington State University
* Pullman, WA 99164-3113
* Email : AlanGenz@wsu.edu
*
* Parameters
*
* N INTEGER, the number of variables.
* LOWER REAL, array of lower integration limits.
* UPPER REAL, array of upper integration limits.
* INFIN INTEGER, array of integration limits flags:
* if INFIN(I) < 0, Ith limits are (-infinity, infinity);
* if INFIN(I) = 0, Ith limits are (-infinity, UPPER(I)];
* if INFIN(I) = 1, Ith limits are [LOWER(I), infinity);
* if INFIN(I) = 2, Ith limits are [LOWER(I), UPPER(I)].
* CORREL REAL, array of correlation coefficients; the correlation
* coefficient in row I column J of the correlation matrix
* should be stored in CORREL( J + ((I-2)*(I-1))/2 ), for J < I.
* THe correlation matrix must be positive semidefinite.
* MAXPTS INTEGER, maximum number of function values allowed. This
* parameter can be used to limit the time. A sensible
* strategy is to start with MAXPTS = 1000*N, and then
* increase MAXPTS if ERROR is too large.
* ABSEPS REAL absolute error tolerance.
* RELEPS REAL relative error tolerance.
* ERROR REAL estimated absolute error, with 99% confidence level.
* VALUE REAL estimated value for the integral
* INFORM INTEGER, termination status parameter:
* if INFORM = 0, normal completion with ERROR < EPS;
* if INFORM = 1, completion with ERROR > EPS and MAXPTS
* function vaules used; increase MAXPTS to
* decrease ERROR;
* if INFORM = 2, N > 500 or N < 1.
*
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[10.0,10.0],[0,0],[0.5])
(2e-016, 1.0, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[100.0,100.0],[0,0],[0.0])
(2e-016, 1.0, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[1.0,1.0],[0,0],[0.0])
(2e-016, 0.70786098173714096, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.001,1.0],[0,0],[0.0])
(2e-016, 0.42100802096993045, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.001,10.0],[0,0],[0.0])
(2e-016, 0.50039894221391101, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.001,100.0],[0,0],[0.0])
(2e-016, 0.50039894221391101, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.01,100.0],[0,0],[0.0])
(2e-016, 0.5039893563146316, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.1,100.0],[0,0],[0.0])
(2e-016, 0.53982783727702899, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.1,100.0],[2,2],[0.0])
(2e-016, 0.019913918638514494, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.0])
(2e-016, 0.25, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.0,0.0],[-1,0],[0.0])
(2e-016, 0.5, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.0,0.0],[-1,0],[0.5])
(2e-016, 0.5, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.5])
(2e-016, 0.33333333333333337, 0)
>>> scipy.stats.kde.mvn.mvndst([0.0,0.0],[0.0,0.0],[0,0],[0.99])
(2e-016, 0.47747329317779391, 0)
'''
#from scipy.stats import kde
informcode = {0: 'normal completion with ERROR < EPS',
1: '''completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR;''',
2: 'N > 500 or N < 1'}
def mvstdnormcdf(lower, upper, corrcoef, **kwds):
'''standardized multivariate normal cumulative distribution function
This is a wrapper for scipy.stats.kde.mvn.mvndst which calculates
a rectangular integral over a standardized multivariate normal
distribution.
This function assumes standardized scale, that is the variance in each dimension
is one, but correlation can be arbitrary, covariance = correlation matrix
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
corrcoef : float or array_like
specifies correlation matrix in one of three ways, see notes
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
The correlation matrix corrcoef can be given in 3 different ways
If the multivariate normal is two-dimensional than only the
correlation coefficient needs to be provided.
For general dimension the correlation matrix can be provided either
as a one-dimensional array of the upper triangular correlation
coefficients stacked by rows, or as full square correlation matrix
See Also
--------
mvnormcdf : cdf of multivariate normal distribution without
standardization
Examples
--------
>>> print mvstdnormcdf([-np.inf,-np.inf], [0.0,np.inf], 0.5)
0.5
>>> corr = [[1.0, 0, 0.5],[0,1,0],[0.5,0,1]]
>>> print mvstdnormcdf([-np.inf,-np.inf,-100.0], [0.0,0.0,0.0], corr, abseps=1e-6)
0.166666399198
>>> print mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0],corr, abseps=1e-8)
something wrong completion with ERROR > EPS and MAXPTS function values used;
increase MAXPTS to decrease ERROR; 1.048330348e-006
0.166666546218
>>> print mvstdnormcdf([-np.inf,-np.inf,-100.0],[0.0,0.0,0.0], corr,
maxpts=100000, abseps=1e-8)
0.166666588293
'''
n = len(lower)
#don't know if converting to array is necessary,
#but it makes ndim check possible
lower = np.array(lower)
upper = np.array(upper)
corrcoef = np.array(corrcoef)
correl = np.zeros(n*(n-1)/2.0) #dtype necessary?
if (lower.ndim != 1) or (upper.ndim != 1):
raise ValueError('can handle only 1D bounds')
if len(upper) != n:
raise ValueError('bounds have different lengths')
if n==2 and corrcoef.size==1:
correl = corrcoef
#print 'case scalar rho', n
elif corrcoef.ndim == 1 and len(corrcoef) == n*(n-1)/2.0:
#print 'case flat corr', corrcoeff.shape
correl = corrcoef
elif corrcoef.shape == (n,n):
#print 'case square corr', correl.shape
correl = corrcoef[np.tril_indices(n, -1)]
# for ii in range(n):
# for jj in range(ii):
# correl[ jj + ((ii-2)*(ii-1))/2] = corrcoef[ii,jj]
else:
raise ValueError('corrcoef has incorrect dimension')
if not 'maxpts' in kwds:
if n >2:
kwds['maxpts'] = 10000*n
lowinf = np.isneginf(lower)
uppinf = np.isposinf(upper)
infin = 2.0*np.ones(n)
np.putmask(infin,lowinf,0)# infin.putmask(0,lowinf)
np.putmask(infin,uppinf,1) #infin.putmask(1,uppinf)
#this has to be last
np.putmask(infin,lowinf*uppinf,-1)
## #remove infs
## np.putmask(lower,lowinf,-100)# infin.putmask(0,lowinf)
## np.putmask(upper,uppinf,100) #infin.putmask(1,uppinf)
#print lower,',',upper,',',infin,',',correl
#print correl.shape
#print kwds.items()
error, cdfvalue, inform = scipy.stats.kde.mvn.mvndst(lower,upper,infin,correl,**kwds)
if inform:
print('something wrong', informcode[inform], error)
return cdfvalue
def mvnormcdf(upper, mu, cov, lower=None, **kwds):
'''multivariate normal cumulative distribution function
This is a wrapper for scipy.stats.kde.mvn.mvndst which calculates
a rectangular integral over a multivariate normal distribution.
Parameters
----------
lower, upper : array_like, 1d
lower and upper integration limits with length equal to the number
of dimensions of the multivariate normal distribution. It can contain
-np.inf or np.inf for open integration intervals
mu : array_lik, 1d
list or array of means
cov : array_like, 2d
specifies covariance matrix
optional keyword parameters to influence integration
* maxpts : int, maximum number of function values allowed. This
parameter can be used to limit the time. A sensible
strategy is to start with `maxpts` = 1000*N, and then
increase `maxpts` if ERROR is too large.
* abseps : float absolute error tolerance.
* releps : float relative error tolerance.
Returns
-------
cdfvalue : float
value of the integral
Notes
-----
This function normalizes the location and scale of the multivariate
normal distribution and then uses `mvstdnormcdf` to call the integration.
See Also
--------
mvstdnormcdf : location and scale standardized multivariate normal cdf
'''
upper = np.array(upper)
if lower is None:
lower = -np.ones(upper.shape) * np.inf
else:
lower = np.array(lower)
cov = np.array(cov)
stdev = np.sqrt(np.diag(cov)) # standard deviation vector
#do I need to make sure stdev is float and not int?
#is this correct to normalize to corr?
lower = (lower - mu)/stdev
upper = (upper - mu)/stdev
divrow = np.atleast_2d(stdev)
corr = cov/divrow/divrow.T
#v/np.sqrt(np.atleast_2d(np.diag(covv)))/np.sqrt(np.atleast_2d(np.diag(covv))).T
return mvstdnormcdf(lower, upper, corr, **kwds)
if __name__ == '__main__':
examples_transf()
| |
# vi: ts=8 sts=4 sw=4 et
#
# loader.py: module loader
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import os
import os.path
import sys
import imp
import stat
import logging
from draco2.core.exception import *
from draco2.util.misc import get_backtrace
from draco2.util.loader import module_from_path
class FileLoader(object):
"""Python 2.3 style module loader that loads a regular file."""
def __init__(self, fname, loader=None):
self.m_fname = fname
self.m_loader = loader
def load_module(self, fullname):
"""Load the module `fullname'."""
fin = file(self.m_fname)
try:
code = fin.read()
mod = imp.new_module(fullname)
mod.__file__ = self.m_fname
mod.__loader__ = self
code = compile(code, self.m_fname, 'exec')
exec code in mod.__dict__
sys.modules[fullname] = mod
finally:
fin.close()
if self.m_loader:
self.m_loader._watch_module(mod)
return mod
class PackageLoader(object):
"""Python 2.3 style module loader for an empty package directory."""
def __init__(self, dname, loader=None):
self.m_dname = dname
self.m_loader = loader
def load_module(self, fullname):
"""Return an empty module for `fullname'."""
mod = imp.new_module(fullname)
sys.modules[fullname] = mod
mod.__file__ = self.m_dname
mod.__loader__ = self
mod.__path__ = []
return mod
class DracoImporter(object):
"""Python 2.3 style module importer.
This importer is registered to sys.meta_path when Draco starts up.
It is used to make the document root look like a single Python
package.
"""
def __init__(self, package, root, loader=None):
self.m_package = package
self.m_root = root
self.m_loader = loader
def find_module(self, fullname, path=None):
"""Return an importer object for the module `fullname'."""
if not fullname.startswith(self.m_package):
return
fname = fullname[len(self.m_package)+1:].replace('.', '/')
fname = os.path.normpath(self.m_root + '/' + fname)
try:
st = os.stat(fname)
except OSError:
st = None
if st and stat.S_ISDIR(st.st_mode):
return PackageLoader(fname, self.m_loader)
fname += '.py'
try:
st = os.stat(fname)
except OSError:
st = None
if st and stat.S_ISREG(st.st_mode):
return FileLoader(fname, self.m_loader)
class Loader(object):
"""Draco Module loader.
This loader is used by Draco to load files that define customization
classes.
"""
def __init__(self):
"""Constructor."""
self.m_scopes = {}
self.m_modules = set()
self.m_changectx = None
@classmethod
def _create(cls, api):
"""Factory function to create a Loader object."""
loader = cls()
if hasattr(api, 'changes'):
loader._set_change_manager(api.changes)
docroot = api.options['documentroot']
loader.add_scope('__docroot__', docroot)
return loader
def _set_change_manager(self, changes):
"""Use change manager `changes'."""
context = changes.get_context('draco2.core.loader')
context.add_callback(self._change_callback)
self.m_changectx = context
def _change_callback(self, api):
"""Callback that is run by the change manager whenever a file
we loaded changed. This will clear all references to loaded
modules.
"""
logger = logging.getLogger('draco2.core.loader')
logger.info('Releasing %d modules.' % len(self.m_modules))
for mod in self.m_modules:
del sys.modules[mod]
self.m_modules.clear()
def _watch_module(self, module):
"""Watch module `module' for changes."""
self.m_modules.add(module.__name__)
if self.m_changectx:
self.m_changectx.add_file(module.__file__)
def add_scope(self, scope, dirbase):
"""Add a scope to the loader.
A scope is a directory below which Python sources can be loaded.
"""
self.m_scopes[scope] = dirbase
importer = DracoImporter(scope, dirbase, self)
sys.meta_path.insert(0, importer)
def _import(self, modname):
"""Import a module `modname'."""
try:
mod = __import__(modname, globals(), locals())
except SyntaxError, err:
error = DracoSiteError('Syntax error in module.')
error.filename = modname
error.lineno = err.lineno
error.backtrace = get_backtrace()
raise error
except (ImportError, Exception):
error = DracoSiteError('Could not import module.')
error.filename = modname
error.backtrace = get_backtrace()
raise error
parts = modname.split('.')
for part in parts[1:]:
mod = getattr(mod, part)
return mod
def load_class(self, fname, typ, scope, default=None):
"""Load Python source `fname' in scope `scope' and look for
subclasses of `typ'. Return a the lowest subclass of `typ' which
is not `typ' itself.
"""
clslist = self.load_classes(fname, typ, scope)
if not clslist:
return default
clslist.sort(lambda x,y: len(x.__mro__) - len(y.__mro__))
return clslist[-1]
def load_classes(self, fname, typ, scope):
"""Load Python source `fname' in scope `scope' and look for
subclasses of `typ'. Return a list of all strict subclasses,
i.e. subclasses which are not the class itself.
"""
if scope not in self.m_scopes:
raise DracoSiteError, 'Unknown scope %s' % scope
path = self.m_scopes[scope] + os.sep + fname
try:
st = os.stat(path)
except OSError:
# It is not an error to load from a non-existing module
return []
modname = module_from_path(scope, fname)
try:
mod = sys.modules[modname]
except KeyError:
mod = self._import(modname)
clslist = []
for symbol in dir(mod):
try:
attr = getattr(mod, symbol)
if isinstance(attr, type) and issubclass(attr, typ) and \
attr.__module__ == mod.__name__:
clslist.append(attr)
except (AttributeError, TypeError):
pass
return clslist
| |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all types of tests from one unified interface."""
import argparse
import collections
import itertools
import logging
import os
import signal
import sys
import threading
import unittest
import devil_chromium
from devil import base_error
from devil import devil_env
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
from devil.android import forwarder
from devil.android import ports
from devil.utils import reraiser_thread
from devil.utils import run_tests_helper
from pylib import constants
from pylib.constants import host_paths
from pylib.base import base_test_result
from pylib.base import environment_factory
from pylib.base import test_dispatcher
from pylib.base import test_instance_factory
from pylib.base import test_run_factory
from pylib.linker import setup as linker_setup
from pylib.junit import setup as junit_setup
from pylib.junit import test_dispatcher as junit_dispatcher
from pylib.monkey import setup as monkey_setup
from pylib.monkey import test_options as monkey_test_options
from pylib.perf import setup as perf_setup
from pylib.perf import test_options as perf_test_options
from pylib.perf import test_runner as perf_test_runner
from pylib.results import json_results
from pylib.results import report_results
_DEVIL_STATIC_CONFIG_FILE = os.path.abspath(os.path.join(
host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'devil_config.json'))
def AddCommonOptions(parser):
"""Adds all common options to |parser|."""
group = parser.add_argument_group('Common Options')
default_build_type = os.environ.get('BUILDTYPE', 'Debug')
debug_or_release_group = group.add_mutually_exclusive_group()
debug_or_release_group.add_argument(
'--debug', action='store_const', const='Debug', dest='build_type',
default=default_build_type,
help=('If set, run test suites under out/Debug. '
'Default is env var BUILDTYPE or Debug.'))
debug_or_release_group.add_argument(
'--release', action='store_const', const='Release', dest='build_type',
help=('If set, run test suites under out/Release. '
'Default is env var BUILDTYPE or Debug.'))
group.add_argument('--build-directory', dest='build_directory',
help=('Path to the directory in which build files are'
' located (should not include build type)'))
group.add_argument('--output-directory', dest='output_directory',
help=('Path to the directory in which build files are'
' located (must include build type). This will take'
' precedence over --debug, --release and'
' --build-directory'))
group.add_argument('--num_retries', '--num-retries', dest='num_retries',
type=int, default=2,
help=('Number of retries for a test before '
'giving up (default: %(default)s).'))
group.add_argument('-v',
'--verbose',
dest='verbose_count',
default=0,
action='count',
help='Verbose level (multiple times for more)')
group.add_argument('--flakiness-dashboard-server',
dest='flakiness_dashboard_server',
help=('Address of the server that is hosting the '
'Chrome for Android flakiness dashboard.'))
group.add_argument('--enable-platform-mode', action='store_true',
help=('Run the test scripts in platform mode, which '
'conceptually separates the test runner from the '
'"device" (local or remote, real or emulated) on '
'which the tests are running. [experimental]'))
group.add_argument('-e', '--environment', default='local',
choices=constants.VALID_ENVIRONMENTS,
help='Test environment to run in (default: %(default)s).')
group.add_argument('--adb-path',
help=('Specify the absolute path of the adb binary that '
'should be used.'))
group.add_argument('--json-results-file', '--test-launcher-summary-output',
dest='json_results_file',
help='If set, will dump results in JSON form '
'to specified file.')
logcat_output_group = group.add_mutually_exclusive_group()
logcat_output_group.add_argument(
'--logcat-output-dir',
help='If set, will dump logcats recorded during test run to directory. '
'File names will be the device ids with timestamps.')
logcat_output_group.add_argument(
'--logcat-output-file',
help='If set, will merge logcats recorded during test run and dump them '
'to the specified file.')
class FastLocalDevAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
namespace.verbose_count = max(namespace.verbose_count, 1)
namespace.num_retries = 0
namespace.enable_device_cache = True
namespace.enable_concurrent_adb = True
namespace.skip_clear_data = True
namespace.extract_test_list_from_filter = True
group.add_argument('--fast-local-dev', type=bool, nargs=0,
action=FastLocalDevAction,
help='Alias for: --verbose --num-retries=0 '
'--enable-device-cache --enable-concurrent-adb '
'--skip-clear-data --extract-test-list-from-filter')
def ProcessCommonOptions(args):
"""Processes and handles all common options."""
run_tests_helper.SetLogLevel(args.verbose_count)
constants.SetBuildType(args.build_type)
if args.build_directory:
constants.SetBuildDirectory(args.build_directory)
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
devil_custom_deps = None
if args.adb_path:
devil_custom_deps = {
'adb': {
devil_env.GetPlatform(): [args.adb_path]
}
}
devil_chromium.Initialize(
output_directory=constants.GetOutDirectory(),
custom_deps=devil_custom_deps)
# Some things such as Forwarder require ADB to be in the environment path.
adb_dir = os.path.dirname(constants.GetAdbPath())
if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
def AddRemoteDeviceOptions(parser):
group = parser.add_argument_group('Remote Device Options')
group.add_argument('--trigger',
help=('Only triggers the test if set. Stores test_run_id '
'in given file path. '))
group.add_argument('--collect',
help=('Only collects the test results if set. '
'Gets test_run_id from given file path.'))
group.add_argument('--remote-device', action='append',
help='Device type to run test on.')
group.add_argument('--results-path',
help='File path to download results to.')
group.add_argument('--api-protocol',
help='HTTP protocol to use. (http or https)')
group.add_argument('--api-address',
help='Address to send HTTP requests.')
group.add_argument('--api-port',
help='Port to send HTTP requests to.')
group.add_argument('--runner-type',
help='Type of test to run as.')
group.add_argument('--runner-package',
help='Package name of test.')
group.add_argument('--device-type',
choices=constants.VALID_DEVICE_TYPES,
help=('Type of device to run on. iOS or android'))
group.add_argument('--device-oem', action='append',
help='Device OEM to run on.')
group.add_argument('--remote-device-file',
help=('File with JSON to select remote device. '
'Overrides all other flags.'))
group.add_argument('--remote-device-timeout', type=int,
help='Times to retry finding remote device')
group.add_argument('--network-config', type=int,
help='Integer that specifies the network environment '
'that the tests will be run in.')
group.add_argument('--test-timeout', type=int,
help='Test run timeout in seconds.')
device_os_group = group.add_mutually_exclusive_group()
device_os_group.add_argument('--remote-device-minimum-os',
help='Minimum OS on device.')
device_os_group.add_argument('--remote-device-os', action='append',
help='OS to have on the device.')
api_secret_group = group.add_mutually_exclusive_group()
api_secret_group.add_argument('--api-secret', default='',
help='API secret for remote devices.')
api_secret_group.add_argument('--api-secret-file', default='',
help='Path to file that contains API secret.')
api_key_group = group.add_mutually_exclusive_group()
api_key_group.add_argument('--api-key', default='',
help='API key for remote devices.')
api_key_group.add_argument('--api-key-file', default='',
help='Path to file that contains API key.')
def AddDeviceOptions(parser):
"""Adds device options to |parser|."""
group = parser.add_argument_group(title='Device Options')
group.add_argument('--tool',
dest='tool',
help=('Run the test under a tool '
'(use --tool help to list them)'))
group.add_argument('-d', '--device', dest='test_device',
help=('Target device for the test suite '
'to run on.'))
group.add_argument('--blacklist-file', help='Device blacklist file.')
group.add_argument('--enable-device-cache', action='store_true',
help='Cache device state to disk between runs')
group.add_argument('--enable-concurrent-adb', action='store_true',
help='Run multiple adb commands at the same time, even '
'for the same device.')
group.add_argument('--skip-clear-data', action='store_true',
help='Do not wipe app data between tests. Use this to '
'speed up local development and never on bots '
'(increases flakiness)')
def AddGTestOptions(parser):
"""Adds gtest options to |parser|."""
group = parser.add_argument_group('GTest Options')
group.add_argument('-s', '--suite', dest='suite_name',
nargs='+', metavar='SUITE_NAME', required=True,
help='Executable name of the test suite to run.')
group.add_argument('--executable-dist-dir',
help="Path to executable's dist directory for native"
" (non-apk) tests.")
group.add_argument('--test-apk-incremental-install-script',
help='Path to install script for the test apk.')
group.add_argument('--gtest_also_run_disabled_tests',
'--gtest-also-run-disabled-tests',
dest='run_disabled', action='store_true',
help='Also run disabled tests if applicable.')
group.add_argument('-a', '--test-arguments', dest='test_arguments',
default='',
help='Additional arguments to pass to the test.')
group.add_argument('-t', '--shard-timeout',
dest='shard_timeout', type=int, default=120,
help='Timeout to wait for each test '
'(default: %(default)s).')
group.add_argument('--isolate_file_path',
'--isolate-file-path',
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
group.add_argument('--app-data-file', action='append', dest='app_data_files',
help='A file path relative to the app data directory '
'that should be saved to the host.')
group.add_argument('--app-data-file-dir',
help='Host directory to which app data files will be'
' saved. Used with --app-data-file.')
group.add_argument('--delete-stale-data', dest='delete_stale_data',
action='store_true',
help='Delete stale test data on the device.')
group.add_argument('--repeat', '--gtest_repeat', '--gtest-repeat',
dest='repeat', type=int, default=0,
help='Number of times to repeat the specified set of '
'tests.')
group.add_argument('--break-on-failure', '--break_on_failure',
dest='break_on_failure', action='store_true',
help='Whether to break on failure.')
group.add_argument('--extract-test-list-from-filter',
action='store_true',
help='When a test filter is specified, and the list of '
'tests can be determined from it, skip querying the '
'device for the list of all tests. Speeds up local '
'development, but is not safe to use on bots ('
'http://crbug.com/549214')
filter_group = group.add_mutually_exclusive_group()
filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
dest='test_filter',
help='googletest-style filter string.')
filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
help='Path to file that contains googletest-style '
'filter strings. (Lines will be joined with '
'":" to create a single filter string.)')
AddDeviceOptions(parser)
AddCommonOptions(parser)
AddRemoteDeviceOptions(parser)
def AddLinkerTestOptions(parser):
group = parser.add_argument_group('Linker Test Options')
group.add_argument('-f', '--gtest-filter', dest='test_filter',
help='googletest-style filter string.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
def AddJavaTestOptions(argument_group):
"""Adds the Java test options to |option_parser|."""
argument_group.add_argument(
'-f', '--test-filter', '--gtest_filter', '--gtest-filter',
dest='test_filter',
help=('Test filter (if not fully qualified, will run all matches).'))
argument_group.add_argument(
'--repeat', dest='repeat', type=int, default=0,
help='Number of times to repeat the specified set of tests.')
argument_group.add_argument(
'--break-on-failure', '--break_on_failure',
dest='break_on_failure', action='store_true',
help='Whether to break on failure.')
argument_group.add_argument(
'-A', '--annotation', dest='annotation_str',
help=('Comma-separated list of annotations. Run only tests with any of '
'the given annotations. An annotation can be either a key or a '
'key-values pair. A test that has no annotation is considered '
'"SmallTest".'))
argument_group.add_argument(
'-E', '--exclude-annotation', dest='exclude_annotation_str',
help=('Comma-separated list of annotations. Exclude tests with these '
'annotations.'))
argument_group.add_argument(
'--screenshot-directory', dest='screenshot_dir',
help='Capture screenshots of test failures')
argument_group.add_argument(
'--save-perf-json', action='store_true',
help='Saves the JSON file for each UI Perf test.')
argument_group.add_argument(
'--official-build', action='store_true', help='Run official build tests.')
argument_group.add_argument(
'--test_data', '--test-data', action='append', default=[],
help=('Each instance defines a directory of test data that should be '
'copied to the target(s) before running the tests. The argument '
'should be of the form <target>:<source>, <target> is relative to '
'the device data directory, and <source> is relative to the '
'chromium build directory.'))
argument_group.add_argument(
'--disable-dalvik-asserts', dest='set_asserts', action='store_false',
default=True, help='Removes the dalvik.vm.enableassertions property')
def ProcessJavaTestOptions(args):
"""Processes options/arguments and populates |options| with defaults."""
# TODO(jbudorick): Handle most of this function in argparse.
if args.annotation_str:
args.annotations = args.annotation_str.split(',')
elif args.test_filter:
args.annotations = []
else:
args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
'EnormousTest', 'IntegrationTest']
if args.exclude_annotation_str:
args.exclude_annotations = args.exclude_annotation_str.split(',')
else:
args.exclude_annotations = []
def AddInstrumentationTestOptions(parser):
"""Adds Instrumentation test options to |parser|."""
parser.usage = '%(prog)s [options]'
group = parser.add_argument_group('Instrumentation Test Options')
AddJavaTestOptions(group)
java_or_python_group = group.add_mutually_exclusive_group()
java_or_python_group.add_argument(
'-j', '--java-only', action='store_false',
dest='run_python_tests', default=True, help='Run only the Java tests.')
java_or_python_group.add_argument(
'-p', '--python-only', action='store_false',
dest='run_java_tests', default=True,
help='DEPRECATED')
group.add_argument('--host-driven-root',
help='DEPRECATED')
group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
action='store_true',
help='Wait for debugger.')
group.add_argument('--apk-under-test',
help='Path or name of the apk under test.')
group.add_argument('--apk-under-test-incremental-install-script',
help='Path to install script for the --apk-under-test.')
group.add_argument('--test-apk', required=True,
help='Path or name of the apk containing the tests '
'(name is without the .apk extension; '
'e.g. "ContentShellTest").')
group.add_argument('--test-apk-incremental-install-script',
help='Path to install script for the --test-apk.')
group.add_argument('--additional-apk', action='append',
dest='additional_apks', default=[],
help='Additional apk that must be installed on '
'the device when the tests are run')
group.add_argument('--coverage-dir',
help=('Directory in which to place all generated '
'EMMA coverage files.'))
group.add_argument('--device-flags', dest='device_flags', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--device-flags-file', default='',
help='The relative filepath to a file containing '
'command-line flags to set on the device')
group.add_argument('--isolate_file_path',
'--isolate-file-path',
dest='isolate_file_path',
help='.isolate file path to override the default '
'path')
group.add_argument('--delete-stale-data', dest='delete_stale_data',
action='store_true',
help='Delete stale test data on the device.')
group.add_argument('--timeout-scale', type=float,
help='Factor by which timeouts should be scaled.')
group.add_argument('--strict-mode', dest='strict_mode', default='testing',
help='StrictMode command-line flag set on the device, '
'death/testing to kill the process, off to stop '
'checking, flash to flash only. Default testing.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
AddRemoteDeviceOptions(parser)
def AddJUnitTestOptions(parser):
"""Adds junit test options to |parser|."""
group = parser.add_argument_group('JUnit Test Options')
group.add_argument(
'-s', '--test-suite', dest='test_suite', required=True,
help=('JUnit test suite to run.'))
group.add_argument(
'-f', '--test-filter', dest='test_filter',
help='Filters tests googletest-style.')
group.add_argument(
'--package-filter', dest='package_filter',
help='Filters tests by package.')
group.add_argument(
'--runner-filter', dest='runner_filter',
help='Filters tests by runner class. Must be fully qualified.')
group.add_argument(
'--sdk-version', dest='sdk_version', type=int,
help='The Android SDK version.')
group.add_argument(
'--coverage-dir', dest='coverage_dir',
help='Directory to store coverage info.')
AddCommonOptions(parser)
def AddMonkeyTestOptions(parser):
"""Adds monkey test options to |parser|."""
group = parser.add_argument_group('Monkey Test Options')
group.add_argument(
'--package', required=True, choices=constants.PACKAGE_INFO.keys(),
metavar='PACKAGE', help='Package under test.')
group.add_argument(
'--event-count', default=10000, type=int,
help='Number of events to generate (default: %(default)s).')
group.add_argument(
'--category', default='',
help='A list of allowed categories.')
group.add_argument(
'--throttle', default=100, type=int,
help='Delay between events (ms) (default: %(default)s). ')
group.add_argument(
'--seed', type=int,
help=('Seed value for pseudo-random generator. Same seed value generates '
'the same sequence of events. Seed is randomized by default.'))
group.add_argument(
'--extra-args', default='',
help=('String of other args to pass to the command verbatim.'))
AddCommonOptions(parser)
AddDeviceOptions(parser)
def ProcessMonkeyTestOptions(args):
"""Processes all monkey test options.
Args:
args: argparse.Namespace object.
Returns:
A MonkeyOptions named tuple which contains all options relevant to
monkey tests.
"""
# TODO(jbudorick): Handle this directly in argparse with nargs='+'
category = args.category
if category:
category = args.category.split(',')
# TODO(jbudorick): Get rid of MonkeyOptions.
return monkey_test_options.MonkeyOptions(
args.verbose_count,
args.package,
args.event_count,
category,
args.throttle,
args.seed,
args.extra_args)
def AddUirobotTestOptions(parser):
"""Adds uirobot test options to |option_parser|."""
group = parser.add_argument_group('Uirobot Test Options')
group.add_argument('--app-under-test', required=True,
help='APK to run tests on.')
group.add_argument(
'--repeat', dest='repeat', type=int, default=0,
help='Number of times to repeat the uirobot test.')
group.add_argument(
'--minutes', default=5, type=int,
help='Number of minutes to run uirobot test [default: %(default)s].')
AddCommonOptions(parser)
AddDeviceOptions(parser)
AddRemoteDeviceOptions(parser)
def AddPerfTestOptions(parser):
"""Adds perf test options to |parser|."""
group = parser.add_argument_group('Perf Test Options')
class SingleStepAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values and not namespace.single_step:
parser.error('single step command provided, '
'but --single-step not specified.')
elif namespace.single_step and not values:
parser.error('--single-step specified, '
'but no single step command provided.')
setattr(namespace, self.dest, values)
step_group = group.add_mutually_exclusive_group(required=True)
# TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
# This requires removing "--" from client calls.
step_group.add_argument(
'--single-step', action='store_true',
help='Execute the given command with retries, but only print the result '
'for the "most successful" round.')
step_group.add_argument(
'--steps',
help='JSON file containing the list of commands to run.')
step_group.add_argument(
'--print-step',
help='The name of a previously executed perf step to print.')
group.add_argument(
'--output-json-list',
help='Write a simple list of names from --steps into the given file.')
group.add_argument(
'--collect-chartjson-data',
action='store_true',
help='Cache the chartjson output from each step for later use.')
group.add_argument(
'--output-chartjson-data',
default='',
help='Write out chartjson into the given file.')
group.add_argument(
'--get-output-dir-archive', metavar='FILENAME',
help='Write the chached output directory archived by a step into the'
' given ZIP file.')
group.add_argument(
'--flaky-steps',
help=('A JSON file containing steps that are flaky '
'and will have its exit code ignored.'))
group.add_argument(
'--no-timeout', action='store_true',
help=('Do not impose a timeout. Each perf step is responsible for '
'implementing the timeout logic.'))
group.add_argument(
'-f', '--test-filter',
help=('Test filter (will match against the names listed in --steps).'))
group.add_argument(
'--dry-run', action='store_true',
help='Just print the steps without executing.')
# Uses 0.1 degrees C because that's what Android does.
group.add_argument(
'--max-battery-temp', type=int,
help='Only start tests when the battery is at or below the given '
'temperature (0.1 C)')
group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
help='If --single-step is specified, the command to run.')
group.add_argument('--min-battery-level', type=int,
help='Only starts tests when the battery is charged above '
'given level.')
group.add_argument('--known-devices-file', help='Path to known device list.')
AddCommonOptions(parser)
AddDeviceOptions(parser)
def ProcessPerfTestOptions(args):
"""Processes all perf test options.
Args:
args: argparse.Namespace object.
Returns:
A PerfOptions named tuple which contains all options relevant to
perf tests.
"""
# TODO(jbudorick): Move single_step handling down into the perf tests.
if args.single_step:
args.single_step = ' '.join(args.single_step_command)
# TODO(jbudorick): Get rid of PerfOptions.
return perf_test_options.PerfOptions(
args.steps, args.flaky_steps, args.output_json_list,
args.print_step, args.no_timeout, args.test_filter,
args.dry_run, args.single_step, args.collect_chartjson_data,
args.output_chartjson_data, args.get_output_dir_archive,
args.max_battery_temp, args.min_battery_level,
args.known_devices_file)
def AddPythonTestOptions(parser):
group = parser.add_argument_group('Python Test Options')
group.add_argument(
'-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
help='Name of the test suite to run.')
AddCommonOptions(parser)
def _RunLinkerTests(args, devices):
"""Subcommand of RunTestsCommands which runs linker tests."""
runner_factory, tests = linker_setup.Setup(args, devices)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=True, test_timeout=60,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Linker test',
test_package='ChromiumLinkerTest')
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
return exit_code
def _RunJUnitTests(args):
"""Subcommand of RunTestsCommand which runs junit tests."""
runner_factory, tests = junit_setup.Setup(args)
results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
report_results.LogFull(
results=results,
test_type='JUnit',
test_package=args.test_suite)
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
return exit_code
def _RunMonkeyTests(args, devices):
"""Subcommand of RunTestsCommands which runs monkey tests."""
monkey_options = ProcessMonkeyTestOptions(args)
runner_factory, tests = monkey_setup.Setup(monkey_options)
results, exit_code = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=False, test_timeout=None,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Monkey',
test_package='Monkey')
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
return exit_code
def _RunPerfTests(args, active_devices):
"""Subcommand of RunTestsCommands which runs perf tests."""
perf_options = ProcessPerfTestOptions(args)
# Just save a simple json with a list of test names.
if perf_options.output_json_list:
return perf_test_runner.OutputJsonList(
perf_options.steps, perf_options.output_json_list)
# Just print the results from a single previously executed step.
if perf_options.print_step:
return perf_test_runner.PrintTestOutput(
perf_options.print_step, perf_options.output_chartjson_data,
perf_options.get_output_dir_archive)
runner_factory, tests, devices = perf_setup.Setup(
perf_options, active_devices)
# shard=False means that each device will get the full list of tests
# and then each one will decide their own affinity.
# shard=True means each device will pop the next test available from a queue,
# which increases throughput but have no affinity.
results, _ = test_dispatcher.RunTests(
tests, runner_factory, devices, shard=False, test_timeout=None,
num_retries=args.num_retries)
report_results.LogFull(
results=results,
test_type='Perf',
test_package='Perf')
if args.json_results_file:
json_results.GenerateJsonResultsFile([results], args.json_results_file)
if perf_options.single_step:
return perf_test_runner.PrintTestOutput('single_step')
perf_test_runner.PrintSummary(tests)
# Always return 0 on the sharding stage. Individual tests exit_code
# will be returned on the print_step stage.
return 0
def _RunPythonTests(args):
"""Subcommand of RunTestsCommand which runs python unit tests."""
suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
suite_path = suite_vars['path']
suite_test_modules = suite_vars['test_modules']
sys.path = [suite_path] + sys.path
try:
suite = unittest.TestSuite()
suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
for m in suite_test_modules)
runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
return 0 if runner.run(suite).wasSuccessful() else 1
finally:
sys.path = sys.path[1:]
def _GetAttachedDevices(blacklist_file, test_device, enable_cache, num_retries):
"""Get all attached devices.
Args:
blacklist_file: Path to device blacklist.
test_device: Name of a specific device to use.
enable_cache: Whether to enable checksum caching.
Returns:
A list of attached devices.
"""
blacklist = (device_blacklist.Blacklist(blacklist_file)
if blacklist_file
else None)
attached_devices = device_utils.DeviceUtils.HealthyDevices(
blacklist, enable_device_files_cache=enable_cache,
default_retries=num_retries)
if test_device:
test_device = [d for d in attached_devices if d == test_device]
if not test_device:
raise device_errors.DeviceUnreachableError(
'Did not find device %s among attached device. Attached devices: %s'
% (test_device, ', '.join(attached_devices)))
return test_device
else:
if not attached_devices:
raise device_errors.NoDevicesError()
return sorted(attached_devices)
def RunTestsCommand(args): # pylint: disable=too-many-return-statements
"""Checks test type and dispatches to the appropriate function.
Args:
args: argparse.Namespace object.
Returns:
Integer indicated exit code.
Raises:
Exception: Unknown command name passed in, or an exception from an
individual test runner.
"""
command = args.command
ProcessCommonOptions(args)
logging.info('command: %s', ' '.join(sys.argv))
if args.enable_platform_mode or command in ('gtest', 'instrumentation'):
return RunTestsInPlatformMode(args)
forwarder.Forwarder.RemoveHostLog()
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
def get_devices():
return _GetAttachedDevices(args.blacklist_file, args.test_device,
args.enable_device_cache, args.num_retries)
if command == 'linker':
return _RunLinkerTests(args, get_devices())
elif command == 'junit':
return _RunJUnitTests(args)
elif command == 'monkey':
return _RunMonkeyTests(args, get_devices())
elif command == 'perf':
return _RunPerfTests(args, get_devices())
elif command == 'python':
return _RunPythonTests(args)
else:
raise Exception('Unknown test type.')
_SUPPORTED_IN_PLATFORM_MODE = [
# TODO(jbudorick): Add support for more test types.
'gtest',
'instrumentation',
'uirobot',
]
def RunTestsInPlatformMode(args):
def infra_error(message):
logging.fatal(message)
sys.exit(constants.INFRA_EXIT_CODE)
if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
infra_error('%s is not yet supported in platform mode' % args.command)
with environment_factory.CreateEnvironment(args, infra_error) as env:
with test_instance_factory.CreateTestInstance(args, infra_error) as test:
with test_run_factory.CreateTestRun(
args, env, test, infra_error) as test_run:
results = []
repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
else itertools.count())
result_counts = collections.defaultdict(
lambda: collections.defaultdict(int))
iteration_count = 0
for _ in repetitions:
iteration_results = test_run.RunTests()
if iteration_results is not None:
iteration_count += 1
results.append(iteration_results)
for r in iteration_results.GetAll():
result_counts[r.GetName()][r.GetType()] += 1
report_results.LogFull(
results=iteration_results,
test_type=test.TestType(),
test_package=test_run.TestPackage(),
annotation=getattr(args, 'annotations', None),
flakiness_server=getattr(args, 'flakiness_dashboard_server',
None))
if args.break_on_failure and not iteration_results.DidRunPass():
break
if iteration_count > 1:
# display summary results
# only display results for a test if at least one test did not pass
all_pass = 0
tot_tests = 0
for test_name in result_counts:
tot_tests += 1
if any(result_counts[test_name][x] for x in (
base_test_result.ResultType.FAIL,
base_test_result.ResultType.CRASH,
base_test_result.ResultType.TIMEOUT,
base_test_result.ResultType.UNKNOWN)):
logging.critical(
'%s: %s',
test_name,
', '.join('%s %s' % (str(result_counts[test_name][i]), i)
for i in base_test_result.ResultType.GetTypes()))
else:
all_pass += 1
logging.critical('%s of %s tests passed in all %s runs',
str(all_pass),
str(tot_tests),
str(iteration_count))
if args.json_results_file:
json_results.GenerateJsonResultsFile(
results, args.json_results_file)
return (0 if all(r.DidRunPass() for r in results)
else constants.ERROR_EXIT_CODE)
CommandConfigTuple = collections.namedtuple(
'CommandConfigTuple',
['add_options_func', 'help_txt'])
VALID_COMMANDS = {
'gtest': CommandConfigTuple(
AddGTestOptions,
'googletest-based C++ tests'),
'instrumentation': CommandConfigTuple(
AddInstrumentationTestOptions,
'InstrumentationTestCase-based Java tests'),
'junit': CommandConfigTuple(
AddJUnitTestOptions,
'JUnit4-based Java tests'),
'monkey': CommandConfigTuple(
AddMonkeyTestOptions,
"Tests based on Android's monkey"),
'perf': CommandConfigTuple(
AddPerfTestOptions,
'Performance tests'),
'python': CommandConfigTuple(
AddPythonTestOptions,
'Python tests based on unittest.TestCase'),
'linker': CommandConfigTuple(
AddLinkerTestOptions,
'Linker tests'),
'uirobot': CommandConfigTuple(
AddUirobotTestOptions,
'Uirobot test'),
}
def DumpThreadStacks(_signal, _frame):
for thread in threading.enumerate():
reraiser_thread.LogThreadStack(thread)
def main():
signal.signal(signal.SIGUSR1, DumpThreadStacks)
parser = argparse.ArgumentParser()
command_parsers = parser.add_subparsers(title='test types',
dest='command')
for test_type, config in sorted(VALID_COMMANDS.iteritems(),
key=lambda x: x[0]):
subparser = command_parsers.add_parser(
test_type, usage='%(prog)s [options]', help=config.help_txt)
config.add_options_func(subparser)
args = parser.parse_args()
try:
return RunTestsCommand(args)
except base_error.BaseError as e:
logging.exception('Error occurred.')
if e.is_infra_error:
return constants.INFRA_EXIT_CODE
return constants.ERROR_EXIT_CODE
except: # pylint: disable=W0702
logging.exception('Unrecognized error occurred.')
return constants.ERROR_EXIT_CODE
if __name__ == '__main__':
sys.exit(main())
| |
import re, sys, ast, json, time, threading
try:
from urllib.parse import quote, unquote, urlencode # py3
from urllib.request import urlopen, Request
except:
from urllib import quote, unquote, urlencode # py2.7
from urllib2 import urlopen, Request
from base64 import b64encode, b64decode
from cantools import config
from six import string_types
DEBUG = True
# memcache stuff -- overwrite with setters
def getmem(key, tojson=True):
log("memcache getting: %s"%(key,))
def setmem(key, val, fromjson=True):
log("memcache setting: %s -> %s"%(key, val))
def delmem(key):
log("memcache deleting: %s"%(key,))
def clearmem():
log("memcache clearing")
def set_getmem(f):
global getmem
getmem = f
def set_setmem(f):
global setmem
setmem = f
def set_delmem(f):
global delmem
delmem = f
def set_clearmem(f):
global clearmem
clearmem = f
# logging -- overwrite with setlog if ya want
def log(*args, **kwargs):
print(args, kwargs)
# encoding, decoding -- may overwrite with setenc/setdec, but not _that_ necessary
_c = config.scrambler
_cl = len(_c)
_chl = int(_cl / 2)
def flip(c):
i = _c.find(c)
if i == -1:
return c
return _c[(i + _chl) % _cl]
def scramble(s):
return "".join([flip(c) for c in s])
def enc(data):
return scramble(b64encode(hasattr(data, "encode") and data.encode() or data).decode())
def dec(data):
return data.startswith("{") and data or b64decode(scramble(data)).decode()
# setters (see above)
def setlog(f):
global log
log = f
def setenc(f):
global enc
enc = f
def setdec(f):
global dec
dec = f
# threading
localvars = threading.local()
def local(key, fallback=None):
return getattr(localvars, key, fallback)
# request functions
def deUnicodeDict(d):
if not isinstance(d, dict):
return d
n = {}
for v in d:
n[str(v)] = deUnicodeDict(d[v])
return n
def cgi_dump():
return local("request_string")
def cgi_read():
return local("read", sys.stdin.read)()
def set_read(f):
localvars.read = f
def rdec(data):
bdata = b64decode(data.encode())
try: # py2
return unquote(bdata).decode()
except: #py3
return unquote(bdata.decode())
def renc(data):
try: # py2
return b64encode(quote(data).encode()).decode()
except: #py3
return b64encode(quote(data.encode())).decode()
def rb64(data, de=False): # depped
log("[DEPRECATION WARNING] Something just called rb64(), which is depped -- use rec_conv()")
return rec_conv(data, de)
def rec_conv(data, de=False):
if isinstance(data, bytes):
try:
data = data.decode()
except:
pass
if isinstance(data, string_types):
return (de and rdec or renc)(data)
elif isinstance(data, dict):
for k, v in list(data.items()):
data[k] = rec_conv(v, de)
elif isinstance(data, list):
return [rec_conv(d, de) for d in data]
return data
def qs_get(x, y):
val = localvars.request.getvalue(x, y)
if val:
val = unquote(val)
return val
def cgi_load(force=False):
localvars.request_string = cgi_read()
data = config.encode and dec(localvars.request_string) or localvars.request_string
try:
try:
jdata = json.loads(data)
except:
jdata = ast.literal_eval(data)
try:
localvars.request = rec_conv(jdata, True)
except:
localvars.request = jdata
except:
import cgi
localvars.request = cgi.FieldStorage()
setattr(localvars.request, "get", qs_get)
if not localvars.request:
if force or config.web.server == "dez":
localvars.request = {}
else:
fail('no request data!')
def cgi_get(key, choices=None, required=True, default=None):
request = local("request")
val = request.get(key, default)
if val is None and required:
fail('no value submitted for required field: "%s" [%s]'%(key, request))
if choices and val not in choices:
fail('invalid value for "%s": "%s"'%(key, val))
return val
# response functions
def _send(data):
send = local("send")
if send:
send(data)
else:
print(data)
def set_send(f):
localvars.send = f
def _close():
local("close", sys.exit)()
def set_close(f):
localvars.close = f
def _pre_close():
pass
def set_pre_close(f):
global _pre_close
_pre_close = f
def _header(hkey, hval):
header = local("header")
if header:
header(hkey, hval)
else:
_send("%s: %s"%(hkey, hval))
def set_header(f):
localvars.header = f
def _write(data, exit=True, savename=None):
if savename:
setmem(savename, data, False)
# try:
# data = data.decode('ascii', 'replace').encode('utf-8')
# except Exception as e:
# data = data.encode('utf-8')
_send(data)
if exit:
_pre_close()
_close()
def trysavedresponse(key=None):
key = key or local("request_string")
response = getmem(key, False)
response and _write(response, exit=True)
def dez_wrap(resp, failure):
from cantools.db import session
from rel.errors import AbortBranch
def f():
try:
resp()
except AbortBranch as e:
session.generator.remove()
raise AbortBranch() # handled in rel
except SystemExit:
pass
except Exception as e:
failure(e)
return f
def gae_wrap(resp, failure):
def f():
try:
resp()
except SystemExit:
pass
except Exception as e:
failure(e)
return f
resp_wrap = { "dez": dez_wrap, "gae": gae_wrap }
def do_respond(responseFunc, failMsg="failed", failHtml=False, failNoEnc=False, noLoad=False, threaded=False, response=None, autowin=True):
def resp():
response and response.set_cbs()
noLoad or cgi_load()
responseFunc()
autowin and succeed()
def failure(e):
fail(data=failMsg, html=failHtml, err=e, noenc=failNoEnc)
wrapped_response = resp_wrap[config.web.server](resp, failure)
if threaded: # dez only!!!
from rel import thread
thread(wrapped_response)
else:
wrapped_response()
def redirect(addr, msg="", noscript=False, exit=True):
a = "<script>"
if msg:
a += 'alert("%s"); '%(msg,)
a += "document.location = '%s';</script>"%(addr,)
if noscript:
a += '<noscript>This site requires Javascript to function properly. To enable Javascript in your browser, please follow <a href="http://www.google.com/support/bin/answer.py?answer=23852">these instructions</a>. Thank you, and have a nice day.</noscript>'
_header("Content-Type", "text/html")
_write(_env(True)%(a,), exit)
def setcachedefault(shouldCache=True):
# deprecated -- should set via config.memcache.update("requst", [bool])
config.memcache.update("request", shouldCache)
def _env(html):
return "%s"
def set_env(f):
global _env
_env = f
def processResponse(data, code):
if code == "1":
try:
data = json.dumps(data)
except:
data = json.dumps(rec_conv(data))
code = "3"
elif code == "0":
try:
json.dumps(data)
except:
data = rec_conv(data)
code = "2"
return "%s%s"%(code, data)
def succeed_sync(func, cb):
d = {}
def handle(*a, **k):
d["a"] = a
d["k"] = k
func(handle)
while True:
time.sleep(0.01)
if d["a"] or d["k"]:
succeed(cb(*d["a"], **d["k"]))
def succeed(data="", html=False, noenc=False, savename=None, cache=False):
if cache or config.memcache.request:
savename = local("request_string")
_header("Content-Type", "text/%s"%(html and "html" or "plain"))
draw = processResponse(data, "1")
dstring = (config.encode and not noenc) and enc(draw) or draw
_write(_env(html)%(dstring,), savename=savename)
def fail(data="failed", html=False, err=None, noenc=False, exit=True):
if err:
# log it
import traceback
logdata = "%s --- %s --> %s"%(data, repr(err), traceback.format_exc())
log(logdata, "error")
if DEBUG:
# write it
data = logdata
_header("Content-Type", "text/%s"%(html and "html" or "plain"))
draw = processResponse(data, "0")
dstring = (config.encode and not noenc) and enc(draw) or draw
_write(_env(html)%(dstring,), exit)
def _headers(headers):
for k, v in list(headers.items()):
_header(k, v)
if config.web.server == "gae":
_send("")
def send_pdf(data, title=None):
if title:
_headers({
"Content-Type": 'application/pdf; name="%s.pdf"'%(title,),
"Content-Disposition": 'attachment; filename="%s.pdf"'%(title,)
})
else:
_headers({"Content-Type": "application/pdf"})
_send(data)
_close()
def send_image(data):
_headers({"Content-Type": "image/png"})
_send(data)
_close()
FILETYPES = {"pdf": "application/pdf", "img": "image/png", "ico": "image/ico"}
def send_file(data, file_type=None, detect=False):
if detect:
import magic
file_type = data and magic.from_buffer(data, True)
if file_type:
_headers({"Content-Type": FILETYPES.get(file_type, file_type)})
_send(data)
_close()
def send_text(data, dtype="html", fname=None, exit=True, headers={}):
headers["Content-Type"] = "text/%s"%(dtype,)
if fname:
headers['Content-Disposition'] = 'attachment; filename="%s.%s"'%(fname, dtype)
_headers(headers)
_write(data, exit)
def send_xml(data):
send_text(data, "xml")
# misc
def verify_recaptcha(cresponse, pkey):
import os
verification_result = urlopen(Request(
url = "https://www.google.com/recaptcha/api/siteverify",
data = urlencode({
'secret': pkey,
'remoteip': os.environ.get('REMOTE_ADDR', os.environ.get('REMOTE_HOST')),
'response': cresponse
}).encode(),
headers = {
"Content-type": "application/x-www-form-urlencoded"
}))
vdata = verification_result.read().decode()
verification_result.close()
if "true" not in vdata:
fail(vdata)
def strip_punctuation(s):
return "".join([c for c in s if c.isalnum() or c.isspace()])
def strip_html(s):
p = re.compile(r'<.*?>')
return p.sub("", s)
def strip_html_old(s, keep_breaks=False):
i = s.find('<');
while i != -1:
j = s.find('>', i)
if keep_breaks and 'br' in s[i:j]:
i = s.find('<', i+1)
else:
s = s[:i] + s[j+1:]
i = s.find('<')
s = s.replace(" ", " ")
while " " in s:
s = s.replace(" ", " ")
return s
| |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Coverage data for coverage.py."""
import glob
import json
import optparse
import os
import os.path
import random
import re
import socket
from coverage import env
from coverage.backward import iitems, string_class
from coverage.debug import _TEST_NAME_FILE
from coverage.files import PathAliases
from coverage.misc import CoverageException, file_be_gone
class CoverageData(object):
"""Manages collected coverage data, including file storage.
This class is the public supported API to the data coverage.py collects
during program execution. It includes information about what code was
executed. It does not include information from the analysis phase, to
determine what lines could have been executed, or what lines were not
executed.
.. note::
The file format is not documented or guaranteed. It will change in
the future, in possibly complicated ways. Do not read coverage.py
data files directly. Use this API to avoid disruption.
There are a number of kinds of data that can be collected:
* **lines**: the line numbers of source lines that were executed.
These are always available.
* **arcs**: pairs of source and destination line numbers for transitions
between source lines. These are only available if branch coverage was
used.
* **file tracer names**: the module names of the file tracer plugins that
handled each file in the data.
* **run information**: information about the program execution. This is
written during "coverage run", and then accumulated during "coverage
combine".
Lines, arcs, and file tracer names are stored for each source file. File
names in this API are case-sensitive, even on platforms with
case-insensitive file systems.
To read a coverage.py data file, use :meth:`read_file`, or
:meth:`read_fileobj` if you have an already-opened file. You can then
access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
or :meth:`file_tracer`. Run information is available with
:meth:`run_infos`.
The :meth:`has_arcs` method indicates whether arc data is available. You
can get a list of the files in the data with :meth:`measured_files`.
A summary of the line data is available from :meth:`line_counts`. As with
most Python containers, you can determine if there is any data at all by
using this object as a boolean value.
Most data files will be created by coverage.py itself, but you can use
methods here to create data files if you like. The :meth:`add_lines`,
:meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
that are convenient for coverage.py. The :meth:`add_run_info` method adds
key-value pairs to the run information.
To add a file without any measured data, use :meth:`touch_file`.
You write to a named file with :meth:`write_file`, or to an already opened
file with :meth:`write_fileobj`.
You can clear the data in memory with :meth:`erase`. Two data collections
can be combined by using :meth:`update` on one :class:`CoverageData`,
passing it the other.
"""
# The data file format is JSON, with these keys:
#
# * lines: a dict mapping file names to lists of line numbers
# executed::
#
# { "file1": [17,23,45], "file2": [1,2,3], ... }
#
# * arcs: a dict mapping file names to lists of line number pairs::
#
# { "file1": [[17,23], [17,25], [25,26]], ... }
#
# * file_tracers: a dict mapping file names to plugin names::
#
# { "file1": "django.coverage", ... }
#
# * runs: a list of dicts of information about the coverage.py runs
# contributing to the data::
#
# [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
#
# Only one of `lines` or `arcs` will be present: with branch coverage, data
# is stored as arcs. Without branch coverage, it is stored as lines. The
# line data is easily recovered from the arcs: it is all the first elements
# of the pairs that are greater than zero.
def __init__(self, debug=None):
"""Create a CoverageData.
`debug` is a `DebugControl` object for writing debug messages.
"""
self._debug = debug
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed:
#
# { 'filename1.py': [12, 47, 1001], ... }
#
self._lines = None
# A map from canonical Python source file name to a dictionary with an
# entry for each pair of line numbers forming an arc:
#
# { 'filename1.py': [(12,14), (47,48), ... ], ... }
#
self._arcs = None
# A map from canonical source file name to a plugin module name:
#
# { 'filename1.py': 'django.coverage', ... }
#
self._file_tracers = {}
# A list of dicts of information about the coverage.py runs.
self._runs = []
def __repr__(self):
return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
klass=self.__class__.__name__,
lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
tracers="{{{0}}}".format(len(self._file_tracers)),
runs="[{0}]".format(len(self._runs)),
)
##
## Reading data
##
def has_arcs(self):
"""Does this data have arcs?
Arc data is only available if branch coverage was used during
collection.
Returns a boolean.
"""
return self._has_arcs()
def lines(self, filename):
"""Get the list of lines executed for a file.
If the file was not measured, returns None. A file might be measured,
and have no lines executed, in which case an empty list is returned.
If the file was executed, returns a list of integers, the line numbers
executed in the file. The list is in no particular order.
"""
if self._arcs is not None:
if filename in self._arcs:
return [s for s, __ in self._arcs[filename] if s > 0]
elif self._lines is not None:
if filename in self._lines:
return self._lines[filename]
return None
def arcs(self, filename):
"""Get the list of arcs executed for a file.
If the file was not measured, returns None. A file might be measured,
and have no arcs executed, in which case an empty list is returned.
If the file was executed, returns a list of 2-tuples of integers. Each
pair is a starting line number and an ending line number for a
transition from one line to another. The list is in no particular
order.
Negative numbers have special meaning. If the starting line number is
-N, it represents an entry to the code object that starts at line N.
If the ending ling number is -N, it's an exit from the code object that
starts at line N.
"""
if self._arcs is not None:
if filename in self._arcs:
return self._arcs[filename]
return None
def file_tracer(self, filename):
"""Get the plugin name of the file tracer for a file.
Returns the name of the plugin that handles this file. If the file was
measured, but didn't use a plugin, then "" is returned. If the file
was not measured, then None is returned.
"""
# Because the vast majority of files involve no plugin, we don't store
# them explicitly in self._file_tracers. Check the measured data
# instead to see if it was a known file with no plugin.
if filename in (self._arcs or self._lines or {}):
return self._file_tracers.get(filename, "")
return None
def run_infos(self):
"""Return the list of dicts of run information.
For data collected during a single run, this will be a one-element
list. If data has been combined, there will be one element for each
original data file.
"""
return self._runs
def measured_files(self):
"""A list of all files that had been measured."""
return list(self._arcs or self._lines or {})
def line_counts(self, fullpath=False):
"""Return a dict summarizing the line coverage data.
Keys are based on the file names, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
Returns a dict mapping file names to counts of lines.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
else:
filename_fn = os.path.basename
for filename in self.measured_files():
summ[filename_fn(filename)] = len(self.lines(filename))
return summ
def __nonzero__(self):
return bool(self._lines or self._arcs)
__bool__ = __nonzero__
def read_fileobj(self, file_obj):
"""Read the coverage data from the given file object.
Should only be used on an empty CoverageData object.
"""
data = self._read_raw_data(file_obj)
self._lines = self._arcs = None
if 'lines' in data:
self._lines = data['lines']
if 'arcs' in data:
self._arcs = dict(
(fname, [tuple(pair) for pair in arcs])
for fname, arcs in iitems(data['arcs'])
)
self._file_tracers = data.get('file_tracers', {})
self._runs = data.get('runs', [])
self._validate()
def read_file(self, filename):
"""Read the coverage data from `filename` into this object."""
if self._debug and self._debug.should('dataio'):
self._debug.write("Reading data from %r" % (filename,))
try:
with self._open_for_reading(filename) as f:
self.read_fileobj(f)
except Exception as exc:
raise CoverageException(
"Couldn't read data from '%s': %s: %s" % (
filename, exc.__class__.__name__, exc,
)
)
_GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
@classmethod
def _open_for_reading(cls, filename):
"""Open a file appropriately for reading data."""
return open(filename, "r")
@classmethod
def _read_raw_data(cls, file_obj):
"""Read the raw data from a file object."""
go_away = file_obj.read(len(cls._GO_AWAY))
if go_away != cls._GO_AWAY:
raise CoverageException("Doesn't seem to be a coverage.py data file")
return json.load(file_obj)
@classmethod
def _read_raw_data_file(cls, filename):
"""Read the raw data from a file, for debugging."""
with cls._open_for_reading(filename) as f:
return cls._read_raw_data(f)
##
## Writing data
##
def add_lines(self, line_data):
"""Add measured line data.
`line_data` is a dictionary mapping file names to dictionaries::
{ filename: { lineno: None, ... }, ...}
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding lines: %d files, %d lines total" % (
len(line_data), sum(len(lines) for lines in line_data.values())
))
if self._has_arcs():
raise CoverageException("Can't add lines to existing arc data")
if self._lines is None:
self._lines = {}
for filename, linenos in iitems(line_data):
if filename in self._lines:
new_linenos = set(self._lines[filename])
new_linenos.update(linenos)
linenos = new_linenos
self._lines[filename] = list(linenos)
self._validate()
def add_arcs(self, arc_data):
"""Add measured arc data.
`arc_data` is a dictionary mapping file names to dictionaries::
{ filename: { (l1,l2): None, ... }, ...}
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding arcs: %d files, %d arcs total" % (
len(arc_data), sum(len(arcs) for arcs in arc_data.values())
))
if self._has_lines():
raise CoverageException("Can't add arcs to existing line data")
if self._arcs is None:
self._arcs = {}
for filename, arcs in iitems(arc_data):
if filename in self._arcs:
new_arcs = set(self._arcs[filename])
new_arcs.update(arcs)
arcs = new_arcs
self._arcs[filename] = list(arcs)
self._validate()
def add_file_tracers(self, file_tracers):
"""Add per-file plugin information.
`file_tracers` is { filename: plugin_name, ... }
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
existing_files = self._arcs or self._lines or {}
for filename, plugin_name in iitems(file_tracers):
if filename not in existing_files:
raise CoverageException(
"Can't add file tracer data for unmeasured file '%s'" % (filename,)
)
existing_plugin = self._file_tracers.get(filename)
if existing_plugin is not None and plugin_name != existing_plugin:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
filename, existing_plugin, plugin_name,
)
)
self._file_tracers[filename] = plugin_name
self._validate()
def add_run_info(self, **kwargs):
"""Add information about the run.
Keywords are arbitrary, and are stored in the run dictionary. Values
must be JSON serializable. You may use this function more than once,
but repeated keywords overwrite each other.
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding run info: %r" % (kwargs,))
if not self._runs:
self._runs = [{}]
self._runs[0].update(kwargs)
self._validate()
def touch_file(self, filename):
"""Ensure that `filename` appears in the data, empty if needed."""
if self._debug and self._debug.should('dataop'):
self._debug.write("Touching %r" % (filename,))
if not self._has_arcs() and not self._has_lines():
raise CoverageException("Can't touch files in an empty CoverageData")
if self._has_arcs():
where = self._arcs
else:
where = self._lines
where.setdefault(filename, [])
self._validate()
def write_fileobj(self, file_obj):
"""Write the coverage data to `file_obj`."""
# Create the file data.
file_data = {}
if self._has_arcs():
file_data['arcs'] = self._arcs
if self._has_lines():
file_data['lines'] = self._lines
if self._file_tracers:
file_data['file_tracers'] = self._file_tracers
if self._runs:
file_data['runs'] = self._runs
# Write the data to the file.
file_obj.write(self._GO_AWAY)
json.dump(file_data, file_obj)
def write_file(self, filename):
"""Write the coverage data to `filename`."""
if self._debug and self._debug.should('dataio'):
self._debug.write("Writing data to %r" % (filename,))
with open(filename, 'w') as fdata:
self.write_fileobj(fdata)
def erase(self):
"""Erase the data in this object."""
self._lines = None
self._arcs = None
self._file_tracers = {}
self._runs = []
self._validate()
def update(self, other_data, aliases=None):
"""Update this data with data from another `CoverageData`.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
if self._has_lines() and other_data._has_arcs():
raise CoverageException("Can't combine arc data with line data")
if self._has_arcs() and other_data._has_lines():
raise CoverageException("Can't combine line data with arc data")
aliases = aliases or PathAliases()
# _file_tracers: only have a string, so they have to agree.
# Have to do these first, so that our examination of self._arcs and
# self._lines won't be confused by data updated from other_data.
for filename in other_data.measured_files():
other_plugin = other_data.file_tracer(filename)
filename = aliases.map(filename)
this_plugin = self.file_tracer(filename)
if this_plugin is None:
if other_plugin:
self._file_tracers[filename] = other_plugin
elif this_plugin != other_plugin:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
filename, this_plugin, other_plugin,
)
)
# _runs: add the new runs to these runs.
self._runs.extend(other_data._runs)
# _lines: merge dicts.
if other_data._has_lines():
if self._lines is None:
self._lines = {}
for filename, file_lines in iitems(other_data._lines):
filename = aliases.map(filename)
if filename in self._lines:
lines = set(self._lines[filename])
lines.update(file_lines)
file_lines = list(lines)
self._lines[filename] = file_lines
# _arcs: merge dicts.
if other_data._has_arcs():
if self._arcs is None:
self._arcs = {}
for filename, file_arcs in iitems(other_data._arcs):
filename = aliases.map(filename)
if filename in self._arcs:
arcs = set(self._arcs[filename])
arcs.update(file_arcs)
file_arcs = list(arcs)
self._arcs[filename] = file_arcs
self._validate()
##
## Miscellaneous
##
def _validate(self):
"""If we are in paranoid mode, validate that everything is right."""
if env.TESTING:
self._validate_invariants()
def _validate_invariants(self):
"""Validate internal invariants."""
# Only one of _lines or _arcs should exist.
assert not(self._has_lines() and self._has_arcs()), (
"Shouldn't have both _lines and _arcs"
)
# _lines should be a dict of lists of ints.
if self._has_lines():
for fname, lines in iitems(self._lines):
assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
assert all(isinstance(x, int) for x in lines), (
"_lines[%r] shouldn't be %r" % (fname, lines)
)
# _arcs should be a dict of lists of pairs of ints.
if self._has_arcs():
for fname, arcs in iitems(self._arcs):
assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
"_arcs[%r] shouldn't be %r" % (fname, arcs)
)
# _file_tracers should have only non-empty strings as values.
for fname, plugin in iitems(self._file_tracers):
assert isinstance(fname, string_class), (
"Key in _file_tracers shouldn't be %r" % (fname,)
)
assert plugin and isinstance(plugin, string_class), (
"_file_tracers[%r] shoudn't be %r" % (fname, plugin)
)
# _runs should be a list of dicts.
for val in self._runs:
assert isinstance(val, dict)
for key in val:
assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the `hasher`.
`hasher` is a :class:`coverage.misc.Hasher` instance to be updated with
the file's data. It should only get the results data, not the run
data.
"""
if self._has_arcs():
hasher.update(sorted(self.arcs(filename) or []))
else:
hasher.update(sorted(self.lines(filename) or []))
hasher.update(self.file_tracer(filename))
##
## Internal
##
def _has_lines(self):
"""Do we have data in self._lines?"""
return self._lines is not None
def _has_arcs(self):
"""Do we have data in self._arcs?"""
return self._arcs is not None
class CoverageDataFiles(object):
"""Manage the use of coverage data files."""
def __init__(self, basename=None):
"""Create a CoverageDataFiles to manage data files.
`basename` is the name of the file to use for storing data.
"""
# Construct the file name that will be used for data storage.
self.filename = os.path.abspath(basename or ".coverage")
def erase(self, parallel=False):
"""Erase the data from the file storage.
If `parallel` is true, then also deletes data files created from the
basename by parallel-mode.
"""
file_be_gone(self.filename)
if parallel:
data_dir, local = os.path.split(self.filename)
localdot = local + '.*'
pattern = os.path.join(os.path.abspath(data_dir), localdot)
for filename in glob.glob(pattern):
file_be_gone(filename)
def read(self, data):
"""Read the coverage data."""
if os.path.exists(self.filename):
data.read_file(self.filename)
def write(self, data, suffix=None):
"""Write the collected coverage data to a file.
`suffix` is a suffix to append to the base file name. This can be used
for multiple or parallel execution, so that many coverage data files
can exist simultaneously. A dot will be used to join the base name and
the suffix.
"""
filename = self.filename
if suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
extra = ""
if _TEST_NAME_FILE: # pragma: debugging
with open(_TEST_NAME_FILE) as f:
test_name = f.read()
extra = "." + test_name
suffix = "%s%s.%s.%06d" % (
socket.gethostname(), extra, os.getpid(),
random.randint(0, 999999)
)
if suffix:
filename += "." + suffix
data.write_file(filename)
def combine_parallel_data(self, data, aliases=None, data_paths=None):
"""Combine a number of data files together.
Treat `self.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
If `data_paths` is provided, it is a list of directories or files to
combine. Directories are searched for files that start with
`self.filename` plus dot as a prefix, and those files are combined.
If `data_paths` is not provided, then the directory portion of
`self.filename` is used as the directory to search for data files.
Every data file found and combined is then deleted from disk.
"""
# Because of the os.path.abspath in the constructor, data_dir will
# never be an empty string.
data_dir, local = os.path.split(self.filename)
localdot = local + '.*'
data_paths = data_paths or [data_dir]
files_to_combine = []
for p in data_paths:
if os.path.isfile(p):
files_to_combine.append(os.path.abspath(p))
elif os.path.isdir(p):
pattern = os.path.join(os.path.abspath(p), localdot)
files_to_combine.extend(glob.glob(pattern))
else:
raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
for f in files_to_combine:
new_data = CoverageData()
new_data.read_file(f)
data.update(new_data, aliases=aliases)
file_be_gone(f)
def canonicalize_json_data(data):
"""Canonicalize our JSON data so it can be compared."""
for fname, lines in iitems(data.get('lines', {})):
data['lines'][fname] = sorted(lines)
for fname, arcs in iitems(data.get('arcs', {})):
data['arcs'][fname] = sorted(arcs)
def pretty_data(data):
"""Format data as JSON, but as nicely as possible.
Returns a string.
"""
# Start with a basic JSON dump.
out = json.dumps(data, indent=4, sort_keys=True)
# But pairs of numbers shouldn't be split across lines...
out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
# Trailing spaces mess with tests, get rid of them.
out = re.sub(r"(?m)\s+$", "", out)
return out
def debug_main(args):
"""Dump the raw data from data files.
Run this as::
$ python -m coverage.data [FILE]
"""
parser = optparse.OptionParser()
parser.add_option(
"-c", "--canonical", action="store_true",
help="Sort data into a canonical order",
)
options, args = parser.parse_args(args)
for filename in (args or [".coverage"]):
print("--- {0} ------------------------------".format(filename))
data = CoverageData._read_raw_data_file(filename)
if options.canonical:
canonicalize_json_data(data)
print(pretty_data(data))
if __name__ == '__main__':
import sys
debug_main(sys.argv[1:])
| |
"""Tests for lock.py"""
import re
import trollius as asyncio
from trollius import From, Return
from trollius import test_utils
from trollius.test_utils import mock
from trollius.test_utils import unittest
STR_RGX_REPR = (
r'^<(?P<class>.*?) object at (?P<address>.*?)'
r'\[(?P<extras>'
r'(set|unset|locked|unlocked)(,value:\d)?(,waiters:\d+)?'
r')\]>\Z'
)
RGX_REPR = re.compile(STR_RGX_REPR)
class LockTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
lock = asyncio.Lock(loop=loop)
self.assertIs(lock._loop, loop)
lock = asyncio.Lock(loop=self.loop)
self.assertIs(lock._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
lock = asyncio.Lock()
self.assertIs(lock._loop, self.loop)
def test_repr(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(repr(lock).endswith('[unlocked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
@asyncio.coroutine
def acquire_lock():
yield From(lock.acquire())
self.loop.run_until_complete(acquire_lock())
self.assertTrue(repr(lock).endswith('[locked]>'))
self.assertTrue(RGX_REPR.match(repr(lock)))
def test_lock(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
yield From(lock.acquire())
raise Return(lock)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_acquire(self):
lock = asyncio.Lock(loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
@asyncio.coroutine
def c1(result):
if (yield From(lock.acquire())):
result.append(1)
raise Return(True)
@asyncio.coroutine
def c2(result):
if (yield From(lock.acquire())):
result.append(2)
raise Return(True)
@asyncio.coroutine
def c3(result):
if (yield From(lock.acquire())):
result.append(3)
raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2], result)
lock.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_acquire_cancel(self):
lock = asyncio.Lock(loop=self.loop)
self.assertTrue(self.loop.run_until_complete(lock.acquire()))
task = asyncio.Task(lock.acquire(), loop=self.loop)
self.loop.call_soon(task.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, task)
self.assertFalse(lock._waiters)
def test_cancel_race(self):
# Several tasks:
# - A acquires the lock
# - B is blocked in aqcuire()
# - C is blocked in aqcuire()
#
# Now, concurrently:
# - B is cancelled
# - A releases the lock
#
# If B's waiter is marked cancelled but not yet removed from
# _waiters, A's release() call will crash when trying to set
# B's waiter; instead, it should move on to C's waiter.
# Setup: A has the lock, b and c are waiting.
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def lockit(name, blocker):
yield From(lock.acquire())
try:
if blocker is not None:
yield From(blocker)
finally:
lock.release()
fa = asyncio.Future(loop=self.loop)
ta = asyncio.Task(lockit('A', fa), loop=self.loop)
test_utils.run_briefly(self.loop, 2)
self.assertTrue(lock.locked())
tb = asyncio.Task(lockit('B', None), loop=self.loop)
test_utils.run_briefly(self.loop, 2)
self.assertEqual(len(lock._waiters), 1)
tc = asyncio.Task(lockit('C', None), loop=self.loop)
test_utils.run_briefly(self.loop, 2)
self.assertEqual(len(lock._waiters), 2)
# Create the race and check.
# Without the fix this failed at the last assert.
fa.set_result(None)
tb.cancel()
self.assertTrue(lock._waiters[0].cancelled())
test_utils.run_briefly(self.loop, 2)
self.assertFalse(lock.locked())
self.assertTrue(ta.done())
self.assertTrue(tb.cancelled())
self.assertTrue(tc.done())
def test_release_not_acquired(self):
lock = asyncio.Lock(loop=self.loop)
self.assertRaises(RuntimeError, lock.release)
def test_release_no_waiters(self):
lock = asyncio.Lock(loop=self.loop)
self.loop.run_until_complete(lock.acquire())
self.assertTrue(lock.locked())
lock.release()
self.assertFalse(lock.locked())
def test_context_manager(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
raise Return((yield From(lock)))
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
def test_context_manager_cant_reuse(self):
lock = asyncio.Lock(loop=self.loop)
@asyncio.coroutine
def acquire_lock():
raise Return((yield From(lock)))
# This spells "yield From(lock)" outside a generator.
cm = self.loop.run_until_complete(acquire_lock())
with cm:
self.assertTrue(lock.locked())
self.assertFalse(lock.locked())
with self.assertRaises(AttributeError):
with cm:
pass
def test_context_manager_no_yield(self):
lock = asyncio.Lock(loop=self.loop)
try:
with lock:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield From" should be used as context manager expression')
self.assertFalse(lock.locked())
class EventTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
ev = asyncio.Event(loop=loop)
self.assertIs(ev._loop, loop)
ev = asyncio.Event(loop=self.loop)
self.assertIs(ev._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
ev = asyncio.Event()
self.assertIs(ev._loop, self.loop)
def test_repr(self):
ev = asyncio.Event(loop=self.loop)
self.assertTrue(repr(ev).endswith('[unset]>'))
match = RGX_REPR.match(repr(ev))
self.assertEqual(match.group('extras'), 'unset')
ev.set()
self.assertTrue(repr(ev).endswith('[set]>'))
self.assertTrue(RGX_REPR.match(repr(ev)))
ev._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(ev))
self.assertTrue(RGX_REPR.match(repr(ev)))
def test_wait(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
result = []
@asyncio.coroutine
def c1(result):
if (yield From(ev.wait())):
result.append(1)
@asyncio.coroutine
def c2(result):
if (yield From(ev.wait())):
result.append(2)
@asyncio.coroutine
def c3(result):
if (yield From(ev.wait())):
result.append(3)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
t3 = asyncio.Task(c3(result), loop=self.loop)
ev.set()
test_utils.run_briefly(self.loop, 2)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertIsNone(t1.result())
self.assertTrue(t2.done())
self.assertIsNone(t2.result())
self.assertTrue(t3.done())
self.assertIsNone(t3.result())
def test_wait_on_set(self):
ev = asyncio.Event(loop=self.loop)
ev.set()
res = self.loop.run_until_complete(ev.wait())
self.assertTrue(res)
def test_wait_cancel(self):
ev = asyncio.Event(loop=self.loop)
wait = asyncio.Task(ev.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(ev._waiters)
def test_clear(self):
ev = asyncio.Event(loop=self.loop)
self.assertFalse(ev.is_set())
ev.set()
self.assertTrue(ev.is_set())
ev.clear()
self.assertFalse(ev.is_set())
def test_clear_with_waiters(self):
ev = asyncio.Event(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
if (yield From(ev.wait())):
result.append(1)
raise Return(True)
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
ev.set()
ev.clear()
self.assertFalse(ev.is_set())
ev.set()
ev.set()
self.assertEqual(1, len(ev._waiters))
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertEqual(0, len(ev._waiters))
self.assertTrue(t.done())
self.assertTrue(t.result())
class ConditionTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
cond = asyncio.Condition(loop=loop)
self.assertIs(cond._loop, loop)
cond = asyncio.Condition(loop=self.loop)
self.assertIs(cond._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
cond = asyncio.Condition()
self.assertIs(cond._loop, self.loop)
def test_wait(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(1)
raise Return(True)
@asyncio.coroutine
def c2(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(2)
raise Return(True)
@asyncio.coroutine
def c3(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(3)
raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
self.assertFalse(cond.locked())
self.assertTrue(self.loop.run_until_complete(cond.acquire()))
cond.notify()
test_utils.run_briefly(self.loop, 2)
self.assertEqual([], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.notify(2)
test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop, 2)
self.assertEqual([1, 2], result)
self.assertTrue(cond.locked())
cond.release()
test_utils.run_briefly(self.loop, 2)
self.assertEqual([1, 2, 3], result)
self.assertTrue(cond.locked())
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_wait_cancel(self):
cond = asyncio.Condition(loop=self.loop)
self.loop.run_until_complete(cond.acquire())
wait = asyncio.Task(cond.wait(), loop=self.loop)
self.loop.call_soon(wait.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, wait)
self.assertFalse(cond._waiters)
self.assertTrue(cond.locked())
def test_wait_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, cond.wait())
def test_wait_for(self):
cond = asyncio.Condition(loop=self.loop)
presult = False
def predicate():
return presult
result = []
@asyncio.coroutine
def c1(result):
yield From(cond.acquire())
if (yield From(cond.wait_for(predicate))):
result.append(1)
cond.release()
raise Return(True)
t = asyncio.Task(c1(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
presult = True
self.loop.run_until_complete(cond.acquire())
cond.notify()
cond.release()
test_utils.run_briefly(self.loop)
self.assertEqual([1], result)
self.assertTrue(t.done())
self.assertTrue(t.result())
def test_wait_for_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
# predicate can return true immediately
res = self.loop.run_until_complete(cond.wait_for(lambda: [1, 2, 3]))
self.assertEqual([1, 2, 3], res)
self.assertRaises(
RuntimeError,
self.loop.run_until_complete,
cond.wait_for(lambda: False))
def test_notify(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(1)
cond.release()
raise Return(True)
@asyncio.coroutine
def c2(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(2)
cond.release()
raise Return(True)
@asyncio.coroutine
def c3(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(3)
cond.release()
raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.release()
# each coroutine requires 2 runs of the event loop
test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.loop.run_until_complete(cond.acquire())
cond.notify(1)
cond.notify(2048)
cond.release()
# each coroutine requires 2 runs of the event loop
test_utils.run_briefly(self.loop, 4)
self.assertEqual([1, 2, 3], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
def test_notify_all(self):
cond = asyncio.Condition(loop=self.loop)
result = []
@asyncio.coroutine
def c1(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(1)
cond.release()
raise Return(True)
@asyncio.coroutine
def c2(result):
yield From(cond.acquire())
if (yield From(cond.wait())):
result.append(2)
cond.release()
raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertEqual([], result)
self.loop.run_until_complete(cond.acquire())
cond.notify_all()
cond.release()
# each coroutine requires 2 runs of the event loop
test_utils.run_briefly(self.loop, 4)
self.assertEqual([1, 2], result)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
def test_notify_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify)
def test_notify_all_unacquired(self):
cond = asyncio.Condition(loop=self.loop)
self.assertRaises(RuntimeError, cond.notify_all)
def test_repr(self):
cond = asyncio.Condition(loop=self.loop)
self.assertTrue('unlocked' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
self.loop.run_until_complete(cond.acquire())
self.assertTrue('locked' in repr(cond))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
cond._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(cond))
self.assertTrue(RGX_REPR.match(repr(cond)))
def test_context_manager(self):
cond = asyncio.Condition(loop=self.loop)
@asyncio.coroutine
def acquire_cond():
raise Return((yield From(cond)))
with self.loop.run_until_complete(acquire_cond()):
self.assertTrue(cond.locked())
self.assertFalse(cond.locked())
def test_context_manager_no_yield(self):
cond = asyncio.Condition(loop=self.loop)
try:
with cond:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield From" should be used as context manager expression')
self.assertFalse(cond.locked())
def test_explicit_lock(self):
lock = asyncio.Lock(loop=self.loop)
cond = asyncio.Condition(lock, loop=self.loop)
self.assertIs(cond._lock, lock)
self.assertIs(cond._loop, lock._loop)
def test_ambiguous_loops(self):
loop = self.new_test_loop()
self.addCleanup(loop.close)
lock = asyncio.Lock(loop=self.loop)
with self.assertRaises(ValueError):
asyncio.Condition(lock, loop=loop)
class SemaphoreTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_ctor_loop(self):
loop = mock.Mock()
sem = asyncio.Semaphore(loop=loop)
self.assertIs(sem._loop, loop)
sem = asyncio.Semaphore(loop=self.loop)
self.assertIs(sem._loop, self.loop)
def test_ctor_noloop(self):
asyncio.set_event_loop(self.loop)
sem = asyncio.Semaphore()
self.assertIs(sem._loop, self.loop)
def test_initial_value_zero(self):
sem = asyncio.Semaphore(0, loop=self.loop)
self.assertTrue(sem.locked())
def test_repr(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertTrue(repr(sem).endswith('[unlocked,value:1]>'))
self.assertTrue(RGX_REPR.match(repr(sem)))
self.loop.run_until_complete(sem.acquire())
self.assertTrue(repr(sem).endswith('[locked]>'))
self.assertTrue('waiters' not in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:1' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
sem._waiters.append(mock.Mock())
self.assertTrue('waiters:2' in repr(sem))
self.assertTrue(RGX_REPR.match(repr(sem)))
def test_semaphore(self):
sem = asyncio.Semaphore(loop=self.loop)
self.assertEqual(1, sem._value)
@asyncio.coroutine
def acquire_lock():
yield From(sem.acquire())
raise Return(sem)
res = self.loop.run_until_complete(acquire_lock())
self.assertTrue(res)
self.assertTrue(sem.locked())
self.assertEqual(0, sem._value)
sem.release()
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
def test_semaphore_value(self):
self.assertRaises(ValueError, asyncio.Semaphore, -1)
def test_acquire(self):
sem = asyncio.Semaphore(3, loop=self.loop)
result = []
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertTrue(self.loop.run_until_complete(sem.acquire()))
self.assertFalse(sem.locked())
@asyncio.coroutine
def c1(result):
yield From(sem.acquire())
result.append(1)
raise Return(True)
@asyncio.coroutine
def c2(result):
yield From(sem.acquire())
result.append(2)
raise Return(True)
@asyncio.coroutine
def c3(result):
yield From(sem.acquire())
result.append(3)
raise Return(True)
@asyncio.coroutine
def c4(result):
yield From(sem.acquire())
result.append(4)
raise Return(True)
t1 = asyncio.Task(c1(result), loop=self.loop)
t2 = asyncio.Task(c2(result), loop=self.loop)
t3 = asyncio.Task(c3(result), loop=self.loop)
# each coroutine requires 2 runs of the event loop
test_utils.run_briefly(self.loop, 2)
self.assertEqual([1], result)
self.assertTrue(sem.locked())
self.assertEqual(2, len(sem._waiters))
self.assertEqual(0, sem._value)
t4 = asyncio.Task(c4(result), loop=self.loop)
sem.release()
sem.release()
self.assertEqual(2, sem._value)
test_utils.run_briefly(self.loop)
self.assertEqual(0, sem._value)
self.assertEqual([1, 2, 3], result)
self.assertTrue(sem.locked())
self.assertEqual(1, len(sem._waiters))
self.assertEqual(0, sem._value)
self.assertTrue(t1.done())
self.assertTrue(t1.result())
self.assertTrue(t2.done())
self.assertTrue(t2.result())
self.assertTrue(t3.done())
self.assertTrue(t3.result())
self.assertFalse(t4.done())
# cleanup locked semaphore
sem.release()
self.loop.run_until_complete(t4)
def test_acquire_cancel(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
acquire = asyncio.Task(sem.acquire(), loop=self.loop)
self.loop.call_soon(acquire.cancel)
self.assertRaises(
asyncio.CancelledError,
self.loop.run_until_complete, acquire)
self.assertFalse(sem._waiters)
def test_release_not_acquired(self):
sem = asyncio.BoundedSemaphore(loop=self.loop)
self.assertRaises(ValueError, sem.release)
def test_release_no_waiters(self):
sem = asyncio.Semaphore(loop=self.loop)
self.loop.run_until_complete(sem.acquire())
self.assertTrue(sem.locked())
sem.release()
self.assertFalse(sem.locked())
def test_context_manager(self):
sem = asyncio.Semaphore(2, loop=self.loop)
@asyncio.coroutine
def acquire_lock():
raise Return((yield From(sem)))
with self.loop.run_until_complete(acquire_lock()):
self.assertFalse(sem.locked())
self.assertEqual(1, sem._value)
with self.loop.run_until_complete(acquire_lock()):
self.assertTrue(sem.locked())
self.assertEqual(2, sem._value)
def test_context_manager_no_yield(self):
sem = asyncio.Semaphore(2, loop=self.loop)
try:
with sem:
self.fail('RuntimeError is not raised in with expression')
except RuntimeError as err:
self.assertEqual(
str(err),
'"yield From" should be used as context manager expression')
self.assertEqual(2, sem._value)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python
"""An object that models the overall state of TUI.
Includes the following items:
- dispatcher: the keyword dispatcher (opscore.actor.CmdKeyVarDispatcher)
note: the network connection is dispatcher.connection
- prefs: the application preferences (TUI.TUIPrefs.TUIPrefs)
- tlSet: the set of toplevels (windows) (RO.Wdg.ToplevelSet)
- root: the root application window (Tkinter.Toplevel);
mostly used when one to execute some Tkinter command
(all of which require an arbitrary Tkinter object)
Note: the model must be created after the Tkinter root
has been created. Otherwise you will get a Tkinter error.
Most items are defined and loaded when the model is created.
However, "tlSet" is empty to start; use this object to add
windows to the application (so their geometry is recorded).
History:
2003-06-09 ROwen
2003-07-18 ROwen Added getConnection, getUsername, getProgID.
2003-10-06 ROwen Added getCmdr; changed getProgramName to getProgID
and made it return in the case used by the Hub.
2003-10-10 ROwen Modified to use new RO.Comm.HubConnection
2004-02-03 ROwen Modified to use RO.OS.getPrefsDir and thus to
look for the geom file where it really belongs.
2004-08-11 ROwen Modified to use RO.Constants.
2004-09-03 ROwen Modified for RO.Wdg._setHelpURLBase -> RO.Constants._setHelpURLBase.
2004-09-08 ROwen Added logMsg method.
2005-01-05 ROwen Changed logMsg state -> severity.
Bug fix: logMsg was misusing severity (formerly state).
2005-06-16 ROwen Modified logMsg for updated KeyDispatcher.logMsg.
2005-08-02 ROwen Modified to find the help directory without it being a package.
2005-09-28 ROwen Modified to use RO.OS.getPrefsDirs instead of getPrefsDir.
2005-10-06 ROwen getprefsDir needs new inclNone=True argument.
2006-03-30 ROwen Supply platform info during login.
2006-10-25 ROwen Enhanced the logMsg function:
- Added keyword argument
- Output is now formatted like hub output.
2009-03-27 ROwen Switch to opscore.actor dispatcher.
2009-03-31 ROwen Changed to use Model() instead of getModel(); internally changed to use
singleton pattern based on __new__.
Renamed instance variable root to tkRoot to be less ambiguous.
Modified for use twisted; added instance variable "reactor".
2009-07-21 ROwen Modified to set new CmdKeyVarDispatcher flag includeName.
2009-07-22 ROwen Modified to log to stdout in test mode (to compensate for the fact
that the dispatcher's default changed to not log).
2009-10-03 ROwen Changed name of prefs file from TUIGeom to <ApplicationName>Geom.
2009-11-09 ROwen Removed a redundant import.
2010-03-10 ROwen getLoginExtras returns more useful info on Mac.
Changed TUI to Version.ApplicationName in various places.
2010-03-18 ROwen Moved _getGeomFile to TUI.TUIPaths.getGeomFile.
2010-05-05 ROwen Modified to not set the twisted.internet.reactor; that now happens in Main.py
before any other part of twisted is imported.
2010-05-20 ROwen Undo the changes of 2010-05-05 and 2010-05-10 since it broke test code.
2010-06-25 ROwen Added logSource field and MaxLogWindows global.
2010-06-28 ROwen Removed unused import (thanks to pychecker).
2010-06-29 ROwen Replaced "stui" with TUI.Version.ApplicationName.lower().
Adapted for change to LogSource (pass dispatcher as an argument).
Bug fix: debug function logToStdOut had unwanted "self" as first argument.
2010-08-25 ROwen Fixed logToStdOut function that prints received messages in test mode;
it was printing messages such as <TUI.Models.LogSource.LogSource object at 0x25e3a30>.
2011-08-16 ROwen Added logFunc.
2013-07-19 ROwen Replaced getLoginExtra function with getPlatform.
2013-10-22 ROwen Implement ticket #1802: increase # of log windows from 5 to 10.
"""
import platform
import sys
import traceback
import twisted.internet.tksupport
import RO.Comm
import RO.Comm.HubConnection
import RO.Constants
import RO.OS
import RO.TkUtil
import RO.Wdg
import opscore.actor.model
import opscore.actor.cmdkeydispatcher
import Tkinter
import TUI.TUIPaths
import TUI.TUIPrefs
import TUI.Version
import LogSource
MaxLogWindows = 10
class Model(object):
def __new__(cls, testMode=False):
if hasattr(cls, 'self'):
return cls.self
cls.self = object.__new__(cls)
self = cls.self
self.tkRoot = Tkinter.Frame().winfo_toplevel()
twisted.internet.tksupport.install(self.tkRoot)
self.reactor = twisted.internet.reactor
platformStr = getPlatform()
loginExtraStr = "type=%r version=%r platform=%r" % \
(TUI.Version.ApplicationName, TUI.Version.VersionName, platformStr)
# network connection
if testMode:
print "Running in test mode, no real connection possible"
connection = RO.Comm.HubConnection.NullConnection()
else:
connection = RO.Comm.HubConnection.HubConnection(
loginExtra = loginExtraStr,
)
# keyword dispatcher
self.dispatcher = opscore.actor.cmdkeydispatcher.CmdKeyVarDispatcher(
name = TUI.Version.ApplicationName.lower(),
connection = connection,
includeName = False,
callKeyVarsOnDisconnect = True,
)
opscore.actor.model.Model.setDispatcher(self.dispatcher)
# log source
self.logSource = LogSource.LogSource(self.dispatcher)
if testMode:
def logToStdOut(logSource):
print logSource.lastEntry.getStr(), # final comma prevents extra newlines
self.logSource.addCallback(logToStdOut)
# function to log a message
self.logFunc = self.logSource.logMsg
# TUI preferences
self.prefs = TUI.TUIPrefs.TUIPrefs()
# TUI window (topLevel) set;
# this starts out empty; others add windows to it
self.tlSet = RO.Wdg.ToplevelSet(
fileName = TUI.TUIPaths.getGeomFile(),
createFile = True, # create file if it doesn't exist
)
# set up standard bindings (since the defaults are poor)
RO.Wdg.stdBindings(self.tkRoot)
# set up the base URL for TUI help
RO.Constants._setHelpURLBase (getBaseHelpURL())
return self
def __init__(self, *args, **kargs):
pass
def getConnection(self):
"""Return the network connection, an RO.Comm.HubConnection object.
"""
return self.dispatcher.connection
def getCmdr(self):
"""Return the commander (in the form program.username)
assigned by the Hub, or None if not connected.
"""
return self.getConnection().getCmdr()
def getProgID(self):
"""Return the program ID (in the case the hub uses),
or None if not connected.
"""
return self.getConnection().getProgID()
def getUsername(self):
"""Return the user name assigned by the Hub,
or None if not connected.
"""
return self.getConnection().getUsername()
def logMsg(self,
msgStr,
severity = RO.Constants.sevNormal,
copyStdErr = False,
doTraceback = False,
keyword = "Text",
):
"""Writes a message to the log window, if available,
else to standard error.
Inputs:
- msgStr message to display; a final \n is appended
- severity one of RO.Constants.sevNormal, sevWarning or sevError
- copyStdErr write copy to standard error?
- doTraceback write traceback to standard error?
(if True then a copy of msgStr is always written to std error)
- keyword keyword for message string; use None if msgStr is already
in keyword-value format.
Note: use tuiModel.dispatcher.logMsg if you want full control
over the message format.
"""
lcName = TUI.Version.ApplicationName.lower()
if keyword:
msgStr = ".%s 0 %s %s=%r" % (lcName, lcName, keyword, msgStr)
else:
msgStr = ".%s 0 %s %s" % (lcName, lcName, msgStr,)
self.dispatcher.logMsg(msgStr, severity = severity)
if copyStdErr or doTraceback:
sys.stderr.write (msgStr + "\n")
if doTraceback:
traceback.print_exc(file=sys.stderr)
def getBaseHelpURL():
"""Return the file URL to the base directory for help"""
# set up the base URL for TUI help
helpDir = RO.OS.getResourceDir(TUI, "Help")
pathList = RO.OS.splitPath(helpDir)
if pathList[0] == "/":
pathList = pathList[1:]
urlStylePath = "/".join(pathList)
if not urlStylePath.endswith("/"):
urlStylePath += "/"
return "file:///" + urlStylePath
def getPlatform():
"""Return a string describing the platform
"""
platformData = platform.platform()
if platformData.lower().startswith("darwin"):
try:
# try to replace Version-kernel#- with MacOSX-vers#-
# this fails on some versions of Python, so ignore errors
macVers = platform.mac_ver()[0]
if macVers:
extraInfo = platformData.split("-", 2)[-1]
platformData = "MacOSX-%s-%s" % (macVers, extraInfo)
except Exception:
pass
return platformData
if __name__ == "__main__":
tuiModel = Model()
print "getBaseHelpURL = ", getBaseHelpURL()
| |
from ImageFile import _tilesort
from retrogamelib import button, font, display, gameobject
from retrogamelib.util import *
from retrogamelib.constants import *
from blockEngine import *
from objects import Player, DropBox, Gate, Solid
from retrogamelib import clock
from levels import LEVELS
from pygame import draw
class Game(object):
def __init__(self):
self.objects = gameobject.Group()
self.coins = gameobject.Group()
self.dropBs = gameobject.Group()
self.solids = gameobject.Group()
self.font = font.Font(GAMEBOY_FONT, (50, 50, 50))
self.gates = gameobject.Group()
self.background = load_image("data/bg.png")
self.levelCompleted = False
#Player.groups = [self.objects]
self.level = 0
Player.groups = [self.objects]
Solid.groups = [self.objects, self.solids]
Gate.groups = [self.objects, self.gates]
DropBox.groups = [self.objects, self.dropBs]
Coin.groups = [self.objects, self.coins]
self.engine = BlockEngine()
self.camera = pygame.Rect(0, 0, GBRES[0], GBRES[1])
self.matrix = 0
def startLevel(self, level):
self.show_win_screen = False
self.player = Player()
if self.lives > 0:
for obj in self.objects:
obj.kill()
self.player = Player()
self.matrix = self.engine.parseLevel(level)
self.camera.centerx = self.player.rect.centerx
else:
self.won = False
self.playing = False
self.lose()
self.tiles = self.engine.parseLevel(level)
def loop(self):
self.playing = True
while self.playing:
self.handle_input()
self.update()
self.draw()
def handle_input(self):
button.handle_input()
if button.is_pressed(START):
self.pause()
if button.is_pressed(A_BUTTON) and button.is_held(SELECT):
self.playing = False
def update(self):
clock.tick()
for object in self.objects:
if (object.rect.right >= self.camera.left and \
object.rect.left <= self.camera.right) or \
object.always_update == True:
object.update(self.engine.tiles)
object.always_update = True
self.camera.centerx = self.player.rect.centerx
if self.camera.left < 0:
self.camera.left = 0
if self.camera.right > len(self.engine.tiles[0])*16:
self.camera.right = len(self.engine.tiles[0])*16
# Make sure we don't move off the far left of the level
if self.player.rect.left < 0:
self.player.rect.left = 0
if self.player.rect.bottom > 144:
self.player.rect.bottom = 144
# Get rich quick!
if button.is_pressed(B_BUTTON):
self.player.falling = True
(x, y) = (self.player.rect.left, self.player.rect.top)
(newX, newY) = self.engine.rotateLeftTile((x, y), 144)
self.player.rect.left = newX
self.player.rect.top = newY
for obj in self.objects:
if not obj == self.player:
obj.kill()
self.matrix = self.engine.rotateLeft()
self.engine.parseMatrix(self.matrix)
if button.is_pressed(A_BUTTON):
self.player.falling = True
(x, y) = (self.player.rect.left, self.player.rect.top)
(newX, newY) = self.engine.rotateLeftTile((x, y), 144)
self.player.rect.left = newX
self.player.rect.top = newY
for obj in self.objects:
if not obj == self.player:
obj.kill()
self.matrix = self.engine.rotateRight()
self.engine.parseMatrix(self.matrix)
for c in self.coins:
if self.player.rect.colliderect(c.rect):
c.kill()
c.looted = True
self.score += 25
play_sound("data/coin.ogg")
for s in self.solids:
if self.player.rect.colliderect(s.rect):
cX = s.rect.centerx
cY = s.rect.centery
pX = self.player.rect.centerx
pY = self.player.rect.centery
if (pX-cX) == 0:
self.player.rect.bottom = s.rect.top
else:
slope = (pY-cY)/(pX-cX)
if slope == 0:
if pX>cX:
self.player.rect.left = s.rect.right
else:
self.player.rect.right = s.rect.left
elif slope > 0 and slope <= 16.0/11:
#slu4ai I
self.player.rect.left = s.rect.right
elif slope < 0 and slope >= -16.0/11:
#slu4ai III
self.player.rect.right = s.rect.left
else:
#slu4ai II
self.player.rect.bottom = s.rect.top
for s in self.dropBs:
if self.player.rect.colliderect(s.rect):
cX = s.rect.centerx
cY = s.rect.centery
pX = self.player.rect.centerx
pY = self.player.rect.centery
if (pX-cX) == 0:
self.player.rect.bottom = s.rect.top
else:
slope = (pY-cY)/(pX-cX)
if slope == 0:
if pX>cX:
self.player.rect.left = s.rect.right
else:
self.player.rect.right = s.rect.left
elif slope > 0 and slope <= 16.0/11:
#slu4ai I
self.player.rect.left = s.rect.right
elif slope < 0 and slope >= -16.0/11:
#slu4ai III
self.player.rect.right = s.rect.left
else:
#slu4ai II
self.player.rect.bottom = s.rect.top
for db in self.dropBs:
print "tile", self.matrix[db.x][db.y], db.x, db.y
if(db.y + 1 < len(self.matrix)):
if self.matrix[db.x][db.y + 1] == '.' or self.matrix[db.x][db.y + 1] == 'C':
temp = self.matrix[db.x][db.y + 1]
self.matrix[db.x][db.y+1] = 'D'
self.matrix[db.x][db.y] = temp
db.y = db.y + 1
for g in self.gates:
if self.player.rect.colliderect(g.rect):
self.levelCompleted = True
'''
if self.player.falling:
self.player.rect.bottom = s.rect.top
#x = self.player.rect.centerx // 16
#y = self.player.rect.centery // 16
#self.player.rect.centerx = x
#self.player.rect.centery = y
#print self.matrix[y][x]
#self.player.falling = False
self.player.facing = 0
#print self.player.facing
if self.player.facing == 1:
if self.player.rect.bottom - 1 < s.rect.top:
print 'collide'
self.player.rect.right = s.rect.left
elif self.player.facing == -1:
if self.player.rect.bottom - 1 < s.rect.top:
print 'collide'
self.player.rect.left = s.rect.right
'''
#print self.player.rect.right, s.rect.left
#print "collide"
def draw(self):
clock.tick()
screen = display.get_surface()
screen.fill(GB_SCREEN_COLOR)
screen.blit(self.background, ((-self.camera.x/2) % 160, 0))
screen.blit(self.background, (((-self.camera.x/2) - 160) % -160, 0))
screen.blit(self.background, (((-self.camera.x/2) + 160) % 160, 0))
# screen.blit(self.image, (self.rect.x - camera.x + self.offsetx,
# self.rect.y - camera.y + self.offsety))
for object in self.objects:
object.draw(screen, self.camera)
ren = self.font.render("score level x%d" % self.lives)
screen.blit(ren, (4, 4))
ren = self.font.render("%06d %d-1" % (self.score, self.level-1))
screen.blit(ren, (4, 14))
#screen.blit(self.lifeicon, (160-30, 2))
if self.levelCompleted:
self.levelCompleted = False
self.level += 1
print "lvl", self.level
if self.level == len(LEVELS):
self.startLevel(LEVELS[self.level])
display.update()
| |
#!/usr/bin/env python
"""An object that models the current state of Agile.
It contains instance variables that are KeyVariables
or sets of KeyVariables. Most of these are directly associated
with status keywords and a few are ones that I generate.
Thus it is relatively easy to get the current value of a parameter
and it is trivial to register callbacks for when values change
or register ROWdg widgets to automatically display updating values.
Note: expStatus is omitted because agileExpose outputs similar information
that is picked up by the exposure model.
History:
2008-11-10 ROwen preliminary; does not include support for the filterwheel
2009-04-17 ROwen Added many new keywords.
2009-06-24 ROwen Added filter keywords.
"""
__all__ = ["getModel"]
import RO.CnvUtil
import RO.Wdg
import RO.KeyVariable
import TUI.TUIModel
# reasonable time for fairly fast commands
_TimeLim = 80
_theModel = None
def getModel():
global _theModel
if _theModel is None:
_theModel = _Model()
return _theModel
class _Model (object):
def __init__(self,
**kargs):
tuiModel = TUI.TUIModel.getModel()
self.actor = "agile"
self.dispatcher = tuiModel.dispatcher
self.timelim = _TimeLim
self.arcsecPerPixel = 0.273
keyVarFact = RO.KeyVariable.KeyVarFactory(
actor = self.actor,
converters = str,
nval = 1,
dispatcher = self.dispatcher,
)
# Filter wheel and filter slide
# make sure fwNames comes before currFilter
self.fwConnState = keyVarFact(
keyword = "fwConnState",
nval = 2,
description = """Filter wheel connection state:
- state: one of Connected, Disconnected, Connecting, Disconnecting
- description: explanation for state (if any)
""",
)
self.fwConfigPath = keyVarFact(
keyword = "fSlideConfig",
converters = (str, RO.CnvUtil.asFloatOrNone),
nval = 2,
description = "Filter slide configuration: filter name, focus offset (um)",
)
self.fwConfigPath = keyVarFact(
keyword = "fwConfigPath",
description = "Path of filter wheel config file",
)
self.fwNames = keyVarFact(
keyword = "fwNames",
nval = [1,None],
description = "Name of filter in each filter wheel slot; name is ? if unknown",
)
self.fwMoveDuration = keyVarFact(
keyword = "fwMoveDuration",
converters = RO.CnvUtil.asInt,
description = "Expected time to completion of filter move (sec)",
allowRefresh = False,
)
self.fwOffsets = keyVarFact(
keyword = "fwOffsets",
converters = RO.CnvUtil.asFloatOrNone,
nval = [1,None],
description = "Focus offset of filter in each filter wheel slot; offset is NaN if unknown",
)
self.fwSlotMinMax = keyVarFact(
keyword = "fwSlotMinMax",
converters = RO.CnvUtil.asIntOrNone,
nval = 2,
description = "Minimum and maximum filterwheel slot number",
)
self.fwStatus = keyVarFact(
keyword = "fwStatus",
converters = (
RO.CnvUtil.asIntOrNone,
RO.CnvUtil.asIntOrNone,
RO.CnvUtil.asIntOrNone,
RO.CnvUtil.asFloatOrNone,
),
nval = 4,
description = """Filter wheel status:
* currSlot: current slot
* desSlot: desired slot
* statusWord: status word as hex constant (0x...)
* estRemTime: estimated remaining time for current command (sec)
""",
)
self.currFilter = keyVarFact(
keyword = "currFilter",
converters = (
RO.CnvUtil.asIntOrNone,
str,
RO.CnvUtil.BoolOrNoneFromStr(trueStrs="In", falseStrs="Out", badStrs="?"),
str,
RO.CnvUtil.asFloatOrNone,
),
nval=5,
description = """Information about current filter:
* slotNum: filter wheel slot number
* slotName: name of filter in filterwheel slot
* slide position: one of In/Out/?
* slide name: name of filter in filter slide if slide is In, else ""
* focusOffset: focus offset in um
""",
)
# Detector
self.detSizeConst = (1024, 1024)
self.bin = keyVarFact(
keyword="bin",
nval = 1,
converters=RO.CnvUtil.asIntOrNone,
description="bin factor (x=y)",
)
self.extSync = keyVarFact(
keyword="extSync",
nval = 1,
converters=RO.CnvUtil.asBoolOrNone,
description="use external sync for accurate timing",
)
self.gain = keyVarFact(
keyword="gain",
nval = 1,
description="amplifier gain; one of low, med or high",
)
self.overscan = keyVarFact(
keyword="overscan",
nval = 2,
converters=RO.CnvUtil.asIntOrNone,
description="overscan: x, y (binned pixels)",
)
self.readRate = keyVarFact(
keyword="readRate",
nval = 1,
description="pixel readout rate; one of slow or fast",
)
self.window = keyVarFact(
keyword="window",
nval = 4,
converters=RO.CnvUtil.asIntOrNone,
description="window (subframe): minX, minY, maxX, maxY (binned pixels; inclusive)",
)
# Exposure Metadata
self.numCircBufImages = keyVarFact(
keyword = "numCircBufImages",
nval = 2,
converters = RO.CnvUtil.asIntOrNone,
description = "Number of images in the circular buffer, maximum allowed",
)
self.readoutTime = keyVarFact(
keyword = "readoutTime",
nval = 1,
converters = RO.CnvUtil.asFloatOrNone,
description = "Time to read out an exposure (sec)",
)
# Environment
self.cameraConnState = keyVarFact(
keyword = "cameraConnState",
nval = 2,
description = """Camera connection state:
- state: one of Connected, Disconnected, Connecting, Disconnecting
- description: explanation for state (if any)
""",
)
self.ccdTemp = keyVarFact(
keyword = "ccdTemp",
nval = 2,
converters = (RO.CnvUtil.asFloatOrNone, str),
description = "CCD temperature (C) and state summary",
)
self.ccdSetTemp = keyVarFact(
keyword = "ccdSetTemp",
nval = 2,
converters = (RO.CnvUtil.asFloatOrNone, str),
description = "CCD temperature setpoint (C) and state summary",
)
self.ccdTempLimits = keyVarFact(
keyword = "ccdTempLimits",
nval = 4,
converters = RO.CnvUtil.asFloatOrNone,
description = "CCD temperature error limit: low, high, veryLow, veryHigh",
)
self.gpsSynced = keyVarFact(
keyword = "gpsSynced",
nval = 1,
converters = RO.CnvUtil.asBoolOrNone,
description = "Sync pulse clock card synced to GPS clock?",
)
self.ntpStatus = keyVarFact(
keyword = "ntpStatus",
nval = 3,
converters = (RO.CnvUtil.asBoolOrNone, str, RO.CnvUtil.asIntOrNone),
description = """State of NTP time synchronization:
- ntp client running
- ntp server name (abbreviated)
- npt server stratum
""",
)
# Parameters
self.biasSecGap = keyVarFact(
keyword = "biasSecGap",
nval = 1,
converters = RO.CnvUtil.asIntOrNone,
description = "Unbinned pixels in overscan to skip before bias section",
)
self.defBin = keyVarFact(
keyword = "defBin",
nval = 1,
converters = RO.CnvUtil.asIntOrNone,
description = "Default bin factor",
)
self.defGain = keyVarFact(
keyword = "defGain",
nval = 1,
description = "Default gain",
)
self.defReadRate = keyVarFact(
keyword = "defReadRate",
nval = 1,
description = "Default read rate",
)
self.defExtSync = keyVarFact(
keyword = "defExtSync",
nval = 1,
converters = RO.CnvUtil.asBoolOrNone,
description = "Default for use external sync for accurate timing",
)
self.maxOverscan = keyVarFact(
keyword = "maxOverscan",
nval = 1,
converters = RO.CnvUtil.asIntOrNone,
description = "Maximum overscan (in unbinned pixels)",
)
self.minExpTime = keyVarFact(
keyword = "minExpTime",
nval = 1,
converters = RO.CnvUtil.asFloatOrNone,
description = "Minimum exposure time (sec)",
)
self.minExpOverheadTime = keyVarFact(
keyword = "minExpOverheadTime",
nval = 1,
converters = RO.CnvUtil.asFloatOrNone,
description = "Minimum time (sec) by which exposure time must exceed readout time",
)
keyVarFact.setKeysRefreshCmd()
if __name__ == "__main__":
getModel()
| |
"""
configuration objects for holmium
"""
import sys
import os
import inspect
import jinja2
from selenium import webdriver
from selenium.webdriver import FirefoxProfile
class Config(dict):
"""Dictionary like helper class for maintaining test data configurations
per environment.
:class:`holmium.core.TestCase` and :class:`holmium.core.HolmiumNose` both
look for either a config.json or config.py file in the same directory as the
test file, and will make a ``config`` object available to the test case
instance.
The :class:`holmium.core.Config` object is aware of the environment
(specified with ``--holmium-env`` when using nose or ``HO_ENV`` as an
environment variable) and will return the config variable from that
environment or from the `default` key.
Values in the config file can use :class:`jinja2.Template` templates to access
either values from itself, environment variables or a select magic holmium
variables: ``holmium.environment``, ``holmium.browser``, ``holmium.user_agent``
and ``holmium.remote``.
Example config structure (which uses a magic variable ``holmium.environment``
and an environment variable ``$PATH``).
JSON
.. code-block:: json
{
'default': { 'path':"{{PATH}}"
, 'login_url': '{{url}}/{{holmium.environment}}/login'}
, 'username' : '{{holmium.environment}}user'}
,'production': {'url':'http://prod.com'
,'password': 'sekret'}
,'development': {'url':'http://dev.com'
,'password': 'password'}
}
Python
.. code-block:: python
config = {
{
'default': { 'path':"{{PATH}}"
, 'login_url': '{{url}}/{{holmium.environment}}/login'}
, 'username' : '{{holmium.environment}}user'}
,'production': {'url':'http://prod.com'
, 'password': 'sekret'}
,'development': {'url':'http://dev.com'
, 'password': 'password'}
}
}
When accessing ``self.config`` within a test, due to the default:
* ``self.config['path']`` will always return the value of the environment
variable `PATH`,
* ``self.config['password']`` will always return 'sekret'
if ``HO_ENV`` or ``--holmium-env`` are ``production``:
* ``self.config['username']`` will return ``productionuser``
* ``self.config['password']`` will return ``sekret``
* ``self.config['login_url']`` will return ``http://prod.com/production/login``
if ``HO_ENV`` or ``--holmium-env`` are ``development``:
* ``self.config['username']`` will return ``developmentuser``
* ``self.config['password']`` will return ``password``
* ``self.config['login_url']`` will return ``http://dev.com/development/login``
"""
# pylint: disable=dangerous-default-value
def __init__(self, dct,
environment={"holmium": {"environment": "development"}}):
self.env = environment
dict.__init__(self, dct)
def __getitem__(self, key):
"""
override to evaluate the values through the template
"""
def __render(item, context):
"""
renders the string given the context using the jinja template
"""
def _check_string_type(_item):
"""
meh python2/3 stuff.
"""
if isinstance(_item, str):
return True
elif sys.version_info < (3, 0, 0):
if isinstance(_item, eval("unicode")):
return True
return False
if _check_string_type(item):
template = jinja2.Template(item)
rendered = template.render(context)
if rendered != item:
return __render(rendered, context)
else:
return rendered
else:
return item
env_ctx = dict.setdefault(self, self.env["holmium"]["environment"], {})
default_ctx = dict.setdefault(self, "default", {})
try:
item = env_ctx[key]
except KeyError:
item = default_ctx[key]
context = dict(self)
context.update(os.environ)
context.update(self.env)
context.update(default_ctx)
context.update(env_ctx)
return __render(item, context)
def __setitem__(self, key, value):
"""
override to put the value in the right environment bucket
"""
sub_dict = dict.setdefault(self, self.env["holmium"]["environment"], {})
sub_dict[key] = value
BROWSER_MAPPING = {"firefox": webdriver.Firefox,
"chrome": webdriver.Chrome,
"safari": webdriver.Safari,
"ie": webdriver.Ie,
"opera": webdriver.Opera,
"remote": webdriver.Remote,
"phantomjs": webdriver.PhantomJS,
"iphone": webdriver.Remote,
"ipad": webdriver.Remote,
"android": webdriver.Remote}
#:
CAPABILITIES = {"firefox": webdriver.DesiredCapabilities.FIREFOX,
"chrome": webdriver.DesiredCapabilities.CHROME,
"safari": webdriver.DesiredCapabilities.SAFARI,
"ie": webdriver.DesiredCapabilities.INTERNETEXPLORER,
"opera": webdriver.DesiredCapabilities.OPERA,
"phantomjs": webdriver.DesiredCapabilities.PHANTOMJS,
"iphone": webdriver.DesiredCapabilities.IPHONE,
"ipad": webdriver.DesiredCapabilities.IPAD,
"android": webdriver.DesiredCapabilities.ANDROID}
class HolmiumConfig(dict):
"""
utility class for storing holmium configuration options strictly.
The class behaves like a dictionary after construction
with the additional behavior that any attributes set on it are available
as keys in the dictionary and vice versa.
"""
# pylint: disable=unused-argument,too-many-arguments,star-args
def __init__(self, browser, remote, capabilities, user_agent, environment,
ignore_ssl, fresh_instance):
data = {}
for arg in inspect.getargspec(HolmiumConfig.__init__).args[1:]:
setattr(self, arg, locals()[arg])
data[arg] = locals()[arg]
super(HolmiumConfig, self).__init__(**data)
def __setattr__(self, key, value):
super(HolmiumConfig, self).__setattr__(key, value)
super(HolmiumConfig, self).__setitem__(key, value)
def __setitem__(self, key, value):
super(HolmiumConfig, self).__setattr__(key, value)
super(HolmiumConfig, self).__setitem__(key, value)
# pylint: disable=too-few-public-methods
class DriverConfig(object):
"""
base class for configuring a webdriver
"""
def __call__(self, config, args):
return args
class FirefoxConfig(DriverConfig):
"""
configuration for firefox
"""
def __call__(self, config, args):
profile = FirefoxProfile()
if config.user_agent:
profile.set_preference("general.useragent.override",
config.user_agent)
if config.ignore_ssl:
profile.accept_untrusted_certs = True
args["firefox_profile"] = profile
args["capabilities"] = args["desired_capabilities"]
args.pop("desired_capabilities")
return args
class ChromeConfig(DriverConfig):
"""
configuration for chrome
"""
def __call__(self, config, args):
args["desired_capabilities"].setdefault("chrome.switches", [])
if config.user_agent:
args["desired_capabilities"]["chrome.switches"].append(
"--user-agent=%s" % config.user_agent)
if config.ignore_ssl:
args["desired_capabilities"]["chrome.switches"].append(
"--ignore-certificate-errors")
return super(ChromeConfig, self).__call__(config, args)
class PhantomConfig(DriverConfig):
"""
configuration for phantomjs
"""
def __call__(self, config, args):
if config.ignore_ssl:
args.setdefault("service_args", []).append(
"--ignore-ssl-errors=true --ssl-protocol=tlsv1")
return super(PhantomConfig, self).__call__(config, args)
class IeConfig(DriverConfig):
"""
configuration for internet explorer
"""
def __call__(self, config, args):
args['desired_capabilities'] = args.pop('desired_capabilities', {})
return super(IeConfig, self).__call__(config, args)
class RemoteConfig(DriverConfig):
"""
configuration for remote driver (and anything that doesnt have a
specific configuration)
"""
def __call__(self, config, args):
if config.browser == "firefox":
if "firefox_profile" in args:
args["browser_profile"] = args["firefox_profile"]
args.pop("firefox_profile")
args["desired_capabilities"] = args["capabilities"]
args.pop("capabilities")
args["command_executor"] = config.remote
return super(RemoteConfig, self).__call__(config, args)
CONFIGURATOR_MAPPER = {
"firefox": FirefoxConfig(),
"chrome": ChromeConfig(),
"phantomjs": PhantomConfig(),
"ie": IeConfig(),
"remote": RemoteConfig()
}
def configure(config):
"""
sets up the arguments required by the specific
:class:`selenium.webdriver.Webdriver` instance
based on the :class:`holmium.core.config.HolmiumConfig`
object that is passed in.
"""
if config.browser not in BROWSER_MAPPING.keys():
raise RuntimeError("unknown browser %s" % config.browser)
merged_capabilities = CAPABILITIES[config.browser]
merged_capabilities.update(config.capabilities)
args = {"desired_capabilities": merged_capabilities}
if config.browser in CONFIGURATOR_MAPPER:
args = CONFIGURATOR_MAPPER[config.browser](config, args)
if config.remote:
args = CONFIGURATOR_MAPPER["remote"](config, args)
return args
| |
# coding: utf-8
from django.utils.translation import ugettext_lazy as _
# Nicely titled (and translatable) country names.
COUNTRIES = (
('AF', _(u'Afghanistan')),
('AX', _(u'\xc5land Islands')),
('AL', _(u'Albania')),
('DZ', _(u'Algeria')),
('AS', _(u'American Samoa')),
('AD', _(u'Andorra')),
('AO', _(u'Angola')),
('AI', _(u'Anguilla')),
('AQ', _(u'Antarctica')),
('AG', _(u'Antigua and Barbuda')),
('AR', _(u'Argentina')),
('AM', _(u'Armenia')),
('AW', _(u'Aruba')),
('AU', _(u'Australia')),
('AT', _(u'Austria')),
('AZ', _(u'Azerbaijan')),
('BS', _(u'Bahamas')),
('BH', _(u'Bahrain')),
('BD', _(u'Bangladesh')),
('BB', _(u'Barbados')),
('BY', _(u'Belarus')),
('BE', _(u'Belgium')),
('BZ', _(u'Belize')),
('BJ', _(u'Benin')),
('BM', _(u'Bermuda')),
('BT', _(u'Bhutan')),
('BO', _(u'Bolivia, Plurinational State of')),
('BQ', _(u'Bonaire, Sint Eustatius and Saba')),
('BA', _(u'Bosnia and Herzegovina')),
('BW', _(u'Botswana')),
('BV', _(u'Bouvet Island')),
('BR', _(u'Brazil')),
('IO', _(u'British Indian Ocean Territory')),
('BN', _(u'Brunei Darussalam')),
('BG', _(u'Bulgaria')),
('BF', _(u'Burkina Faso')),
('BI', _(u'Burundi')),
('KH', _(u'Cambodia')),
('CM', _(u'Cameroon')),
('CA', _(u'Canada')),
('CV', _(u'Cape Verde')),
('KY', _(u'Cayman Islands')),
('CF', _(u'Central African Republic')),
('TD', _(u'Chad')),
('CL', _(u'Chile')),
('CN', _(u'China')),
('CX', _(u'Christmas Island')),
('CC', _(u'Cocos (Keeling) Islands')),
('CO', _(u'Colombia')),
('KM', _(u'Comoros')),
('CG', _(u'Congo')),
('CD', _(u'Congo, The Democratic Republic of the')),
('CK', _(u'Cook Islands')),
('CR', _(u'Costa Rica')),
('CI', _(u"C\xf4te D'ivoire")),
('HR', _(u'Croatia')),
('CU', _(u'Cuba')),
('CW', _(u'Cura\xe7ao')),
('CY', _(u'Cyprus')),
('CZ', _(u'Czech Republic')),
('DK', _(u'Denmark')),
('DJ', _(u'Djibouti')),
('DM', _(u'Dominica')),
('DO', _(u'Dominican Republic')),
('EC', _(u'Ecuador')),
('EG', _(u'Egypt')),
('SV', _(u'El Salvador')),
('GQ', _(u'Equatorial Guinea')),
('ER', _(u'Eritrea')),
('EE', _(u'Estonia')),
('ET', _(u'Ethiopia')),
('FK', _(u'Falkland Islands (Malvinas)')),
('FO', _(u'Faroe Islands')),
('FJ', _(u'Fiji')),
('FI', _(u'Finland')),
('FR', _(u'France')),
('GF', _(u'French Guiana')),
('PF', _(u'French Polynesia')),
('TF', _(u'French Southern Territories')),
('GA', _(u'Gabon')),
('GM', _(u'Gambia')),
('GE', _(u'Georgia')),
('DE', _(u'Germany')),
('GH', _(u'Ghana')),
('GI', _(u'Gibraltar')),
('GR', _(u'Greece')),
('GL', _(u'Greenland')),
('GD', _(u'Grenada')),
('GP', _(u'Guadeloupe')),
('GU', _(u'Guam')),
('GT', _(u'Guatemala')),
('GG', _(u'Guernsey')),
('GN', _(u'Guinea')),
('GW', _(u'Guinea-bissau')),
('GY', _(u'Guyana')),
('HT', _(u'Haiti')),
('HM', _(u'Heard Island and McDonald Islands')),
('VA', _(u'Holy See (Vatican City State)')),
('HN', _(u'Honduras')),
('HK', _(u'Hong Kong')),
('HU', _(u'Hungary')),
('IS', _(u'Iceland')),
('IN', _(u'India')),
('ID', _(u'Indonesia')),
('IR', _(u'Iran, Islamic Republic of')),
('IQ', _(u'Iraq')),
('IE', _(u'Ireland')),
('IM', _(u'Isle of Man')),
('IL', _(u'Israel')),
('IT', _(u'Italy')),
('JM', _(u'Jamaica')),
('JP', _(u'Japan')),
('JE', _(u'Jersey')),
('JO', _(u'Jordan')),
('KZ', _(u'Kazakhstan')),
('KE', _(u'Kenya')),
('KI', _(u'Kiribati')),
('KP', _(u"Korea, Democratic People's Republic of")),
('KR', _(u'Korea, Republic of')),
('KW', _(u'Kuwait')),
('KG', _(u'Kyrgyzstan')),
('LA', _(u"Lao People's Democratic Republic")),
('LV', _(u'Latvia')),
('LB', _(u'Lebanon')),
('LS', _(u'Lesotho')),
('LR', _(u'Liberia')),
('LY', _(u'Libyan Arab Jamahiriya')),
('LI', _(u'Liechtenstein')),
('LT', _(u'Lithuania')),
('LU', _(u'Luxembourg')),
('MO', _(u'Macao')),
('MK', _(u'Macedonia, The Former Yugoslav Republic of')),
('MG', _(u'Madagascar')),
('MW', _(u'Malawi')),
('MY', _(u'Malaysia')),
('MV', _(u'Maldives')),
('ML', _(u'Mali')),
('MT', _(u'Malta')),
('MH', _(u'Marshall Islands')),
('MQ', _(u'Martinique')),
('MR', _(u'Mauritania')),
('MU', _(u'Mauritius')),
('YT', _(u'Mayotte')),
('MX', _(u'Mexico')),
('FM', _(u'Micronesia, Federated States of')),
('MD', _(u'Moldova, Republic of')),
('MC', _(u'Monaco')),
('MN', _(u'Mongolia')),
('ME', _(u'Montenegro')),
('MS', _(u'Montserrat')),
('MA', _(u'Morocco')),
('MZ', _(u'Mozambique')),
('MM', _(u'Myanmar')),
('NA', _(u'Namibia')),
('NR', _(u'Nauru')),
('NP', _(u'Nepal')),
('NL', _(u'Netherlands')),
('NC', _(u'New Caledonia')),
('NZ', _(u'New Zealand')),
('NI', _(u'Nicaragua')),
('NE', _(u'Niger')),
('NG', _(u'Nigeria')),
('NU', _(u'Niue')),
('NF', _(u'Norfolk Island')),
('MP', _(u'Northern Mariana Islands')),
('NO', _(u'Norway')),
('OM', _(u'Oman')),
('PK', _(u'Pakistan')),
('PW', _(u'Palau')),
('PS', _(u'Palestinian Territory, Occupied')),
('PA', _(u'Panama')),
('PG', _(u'Papua New Guinea')),
('PY', _(u'Paraguay')),
('PE', _(u'Peru')),
('PH', _(u'Philippines')),
('PN', _(u'Pitcairn')),
('PL', _(u'Poland')),
('PT', _(u'Portugal')),
('PR', _(u'Puerto Rico')),
('QA', _(u'Qatar')),
('RE', _(u'R\xe9union')),
('RO', _(u'Romania')),
('RU', _(u'Russian Federation')),
('RW', _(u'Rwanda')),
('BL', _(u'Saint Barth\xe9lemy')),
('SH', _(u'Saint Helena, Ascension and Tristan Da Cunha')),
('KN', _(u'Saint Kitts and Nevis')),
('LC', _(u'Saint Lucia')),
('MF', _(u'Saint Martin (French Part)')),
('PM', _(u'Saint Pierre and Miquelon')),
('VC', _(u'Saint Vincent and the Grenadines')),
('WS', _(u'Samoa')),
('SM', _(u'San Marino')),
('ST', _(u'Sao Tome and Principe')),
('SA', _(u'Saudi Arabia')),
('SN', _(u'Senegal')),
('RS', _(u'Serbia')),
('SC', _(u'Seychelles')),
('SL', _(u'Sierra Leone')),
('SG', _(u'Singapore')),
('SX', _(u'Sint Maarten (Dutch Part)')),
('SK', _(u'Slovakia')),
('SI', _(u'Slovenia')),
('SB', _(u'Solomon Islands')),
('SO', _(u'Somalia')),
('ZA', _(u'South Africa')),
('GS', _(u'South Georgia and the South Sandwich Islands')),
('SS', _(u'South Sudan')),
('ES', _(u'Spain')),
('LK', _(u'Sri Lanka')),
('SD', _(u'Sudan')),
('SR', _(u'Suriname')),
('SJ', _(u'Svalbard and Jan Mayen')),
('SZ', _(u'Swaziland')),
('SE', _(u'Sweden')),
('CH', _(u'Switzerland')),
('SY', _(u'Syrian Arab Republic')),
('TW', _(u'Taiwan, Province of China')),
('TJ', _(u'Tajikistan')),
('TZ', _(u'Tanzania, United Republic of')),
('TH', _(u'Thailand')),
('TL', _(u'Timor-leste')),
('TG', _(u'Togo')),
('TK', _(u'Tokelau')),
('TO', _(u'Tonga')),
('TT', _(u'Trinidad and Tobago')),
('TN', _(u'Tunisia')),
('TR', _(u'Turkey')),
('TM', _(u'Turkmenistan')),
('TC', _(u'Turks and Caicos Islands')),
('TV', _(u'Tuvalu')),
('UG', _(u'Uganda')),
('UA', _(u'Ukraine')),
('AE', _(u'United Arab Emirates')),
('GB', _(u'United Kingdom')),
('US', _(u'United States')),
('UM', _(u'United States Minor Outlying Islands')),
('UY', _(u'Uruguay')),
('UZ', _(u'Uzbekistan')),
('VU', _(u'Vanuatu')),
('VE', _(u'Venezuela, Bolivarian Republic of')),
('VN', _(u'Viet Nam')),
('VG', _(u'Virgin Islands, British')),
('VI', _(u'Virgin Islands, U.S.')),
('WF', _(u'Wallis and Futuna')),
('EH', _(u'Western Sahara')),
('YE', _(u'Yemen')),
('ZM', _(u'Zambia')),
('ZW', _(u'Zimbabwe')),
)
# Nicely titled country names with duplicates for those which contain a comma
# (containing the non-comma'd version).
COUNTRIES_PLUS = (
('AF', _(u'Afghanistan')),
('AX', _(u'\xc5land Islands')),
('AL', _(u'Albania')),
('DZ', _(u'Algeria')),
('AS', _(u'American Samoa')),
('AD', _(u'Andorra')),
('AO', _(u'Angola')),
('AI', _(u'Anguilla')),
('AQ', _(u'Antarctica')),
('AG', _(u'Antigua and Barbuda')),
('AR', _(u'Argentina')),
('AM', _(u'Armenia')),
('AW', _(u'Aruba')),
('SH', _(u'Ascension and Tristan Da Cunha Saint Helena')),
('AU', _(u'Australia')),
('AT', _(u'Austria')),
('AZ', _(u'Azerbaijan')),
('BS', _(u'Bahamas')),
('BH', _(u'Bahrain')),
('BD', _(u'Bangladesh')),
('BB', _(u'Barbados')),
('BY', _(u'Belarus')),
('BE', _(u'Belgium')),
('BZ', _(u'Belize')),
('BJ', _(u'Benin')),
('BM', _(u'Bermuda')),
('BT', _(u'Bhutan')),
('VE', _(u'Bolivarian Republic of Venezuela')),
('BO', _(u'Bolivia, Plurinational State of')),
('BQ', _(u'Bonaire, Sint Eustatius and Saba')),
('BA', _(u'Bosnia and Herzegovina')),
('BW', _(u'Botswana')),
('BV', _(u'Bouvet Island')),
('BR', _(u'Brazil')),
('IO', _(u'British Indian Ocean Territory')),
('VG', _(u'British Virgin Islands')),
('BN', _(u'Brunei Darussalam')),
('BG', _(u'Bulgaria')),
('BF', _(u'Burkina Faso')),
('BI', _(u'Burundi')),
('KH', _(u'Cambodia')),
('CM', _(u'Cameroon')),
('CA', _(u'Canada')),
('CV', _(u'Cape Verde')),
('KY', _(u'Cayman Islands')),
('CF', _(u'Central African Republic')),
('TD', _(u'Chad')),
('CL', _(u'Chile')),
('CN', _(u'China')),
('CX', _(u'Christmas Island')),
('CC', _(u'Cocos (Keeling) Islands')),
('CO', _(u'Colombia')),
('KM', _(u'Comoros')),
('CG', _(u'Congo')),
('CD', _(u'Congo, The Democratic Republic of the')),
('CK', _(u'Cook Islands')),
('CR', _(u'Costa Rica')),
('CI', _(u"C\xf4te D'ivoire")),
('HR', _(u'Croatia')),
('CU', _(u'Cuba')),
('CW', _(u'Cura\xe7ao')),
('CY', _(u'Cyprus')),
('CZ', _(u'Czech Republic')),
('KP', _(u"Democratic People's Republic of Korea")),
('DK', _(u'Denmark')),
('DJ', _(u'Djibouti')),
('DM', _(u'Dominica')),
('DO', _(u'Dominican Republic')),
('EC', _(u'Ecuador')),
('EG', _(u'Egypt')),
('SV', _(u'El Salvador')),
('GQ', _(u'Equatorial Guinea')),
('ER', _(u'Eritrea')),
('EE', _(u'Estonia')),
('ET', _(u'Ethiopia')),
('FK', _(u'Falkland Islands (Malvinas)')),
('FO', _(u'Faroe Islands')),
('FM', _(u'Federated States of Micronesia')),
('FJ', _(u'Fiji')),
('FI', _(u'Finland')),
('FR', _(u'France')),
('GF', _(u'French Guiana')),
('PF', _(u'French Polynesia')),
('TF', _(u'French Southern Territories')),
('GA', _(u'Gabon')),
('GM', _(u'Gambia')),
('GE', _(u'Georgia')),
('DE', _(u'Germany')),
('GH', _(u'Ghana')),
('GI', _(u'Gibraltar')),
('GR', _(u'Greece')),
('GL', _(u'Greenland')),
('GD', _(u'Grenada')),
('GP', _(u'Guadeloupe')),
('GU', _(u'Guam')),
('GT', _(u'Guatemala')),
('GG', _(u'Guernsey')),
('GN', _(u'Guinea')),
('GW', _(u'Guinea-bissau')),
('GY', _(u'Guyana')),
('HT', _(u'Haiti')),
('HM', _(u'Heard Island and McDonald Islands')),
('VA', _(u'Holy See (Vatican City State)')),
('HN', _(u'Honduras')),
('HK', _(u'Hong Kong')),
('HU', _(u'Hungary')),
('IS', _(u'Iceland')),
('IN', _(u'India')),
('ID', _(u'Indonesia')),
('IR', _(u'Iran, Islamic Republic of')),
('IQ', _(u'Iraq')),
('IE', _(u'Ireland')),
('IR', _(u'Islamic Republic of Iran')),
('IM', _(u'Isle of Man')),
('IL', _(u'Israel')),
('IT', _(u'Italy')),
('JM', _(u'Jamaica')),
('JP', _(u'Japan')),
('JE', _(u'Jersey')),
('JO', _(u'Jordan')),
('KZ', _(u'Kazakhstan')),
('KE', _(u'Kenya')),
('KI', _(u'Kiribati')),
('KP', _(u"Korea, Democratic People's Republic of")),
('KR', _(u'Korea, Republic of')),
('KW', _(u'Kuwait')),
('KG', _(u'Kyrgyzstan')),
('LA', _(u"Lao People's Democratic Republic")),
('LV', _(u'Latvia')),
('LB', _(u'Lebanon')),
('LS', _(u'Lesotho')),
('LR', _(u'Liberia')),
('LY', _(u'Libyan Arab Jamahiriya')),
('LI', _(u'Liechtenstein')),
('LT', _(u'Lithuania')),
('LU', _(u'Luxembourg')),
('MO', _(u'Macao')),
('MK', _(u'Macedonia, The Former Yugoslav Republic of')),
('MG', _(u'Madagascar')),
('MW', _(u'Malawi')),
('MY', _(u'Malaysia')),
('MV', _(u'Maldives')),
('ML', _(u'Mali')),
('MT', _(u'Malta')),
('MH', _(u'Marshall Islands')),
('MQ', _(u'Martinique')),
('MR', _(u'Mauritania')),
('MU', _(u'Mauritius')),
('YT', _(u'Mayotte')),
('MX', _(u'Mexico')),
('FM', _(u'Micronesia, Federated States of')),
('MD', _(u'Moldova, Republic of')),
('MC', _(u'Monaco')),
('MN', _(u'Mongolia')),
('ME', _(u'Montenegro')),
('MS', _(u'Montserrat')),
('MA', _(u'Morocco')),
('MZ', _(u'Mozambique')),
('MM', _(u'Myanmar')),
('NA', _(u'Namibia')),
('NR', _(u'Nauru')),
('NP', _(u'Nepal')),
('NL', _(u'Netherlands')),
('NC', _(u'New Caledonia')),
('NZ', _(u'New Zealand')),
('NI', _(u'Nicaragua')),
('NE', _(u'Niger')),
('NG', _(u'Nigeria')),
('NU', _(u'Niue')),
('NF', _(u'Norfolk Island')),
('MP', _(u'Northern Mariana Islands')),
('NO', _(u'Norway')),
('PS', _(u'Occupied Palestinian Territory')),
('OM', _(u'Oman')),
('PK', _(u'Pakistan')),
('PW', _(u'Palau')),
('PS', _(u'Palestinian Territory, Occupied')),
('PA', _(u'Panama')),
('PG', _(u'Papua New Guinea')),
('PY', _(u'Paraguay')),
('PE', _(u'Peru')),
('PH', _(u'Philippines')),
('PN', _(u'Pitcairn')),
('BO', _(u'Plurinational State of Bolivia')),
('PL', _(u'Poland')),
('PT', _(u'Portugal')),
('TW', _(u'Province of China Taiwan')),
('PR', _(u'Puerto Rico')),
('QA', _(u'Qatar')),
('KR', _(u'Republic of Korea')),
('MD', _(u'Republic of Moldova')),
('RE', _(u'R\xe9union')),
('RO', _(u'Romania')),
('RU', _(u'Russian Federation')),
('RW', _(u'Rwanda')),
('BL', _(u'Saint Barth\xe9lemy')),
('SH', _(u'Saint Helena, Ascension and Tristan Da Cunha')),
('KN', _(u'Saint Kitts and Nevis')),
('LC', _(u'Saint Lucia')),
('MF', _(u'Saint Martin (French Part)')),
('PM', _(u'Saint Pierre and Miquelon')),
('VC', _(u'Saint Vincent and the Grenadines')),
('WS', _(u'Samoa')),
('SM', _(u'San Marino')),
('ST', _(u'Sao Tome and Principe')),
('SA', _(u'Saudi Arabia')),
('SN', _(u'Senegal')),
('RS', _(u'Serbia')),
('SC', _(u'Seychelles')),
('SL', _(u'Sierra Leone')),
('SG', _(u'Singapore')),
('BQ', _(u'Sint Eustatius and Saba Bonaire')),
('SX', _(u'Sint Maarten (Dutch Part)')),
('SK', _(u'Slovakia')),
('SI', _(u'Slovenia')),
('SB', _(u'Solomon Islands')),
('SO', _(u'Somalia')),
('ZA', _(u'South Africa')),
('GS', _(u'South Georgia and the South Sandwich Islands')),
('SS', _(u'South Sudan')),
('ES', _(u'Spain')),
('LK', _(u'Sri Lanka')),
('SD', _(u'Sudan')),
('SR', _(u'Suriname')),
('SJ', _(u'Svalbard and Jan Mayen')),
('SZ', _(u'Swaziland')),
('SE', _(u'Sweden')),
('CH', _(u'Switzerland')),
('SY', _(u'Syrian Arab Republic')),
('TW', _(u'Taiwan, Province of China')),
('TJ', _(u'Tajikistan')),
('TZ', _(u'Tanzania, United Republic of')),
('TH', _(u'Thailand')),
('CD', _(u'The Democratic Republic of the Congo')),
('MK', _(u'The Former Yugoslav Republic of Macedonia')),
('TL', _(u'Timor-leste')),
('TG', _(u'Togo')),
('TK', _(u'Tokelau')),
('TO', _(u'Tonga')),
('TT', _(u'Trinidad and Tobago')),
('TN', _(u'Tunisia')),
('TR', _(u'Turkey')),
('TM', _(u'Turkmenistan')),
('TC', _(u'Turks and Caicos Islands')),
('TV', _(u'Tuvalu')),
('VI', _(u'U.S. Virgin Islands')),
('UG', _(u'Uganda')),
('UA', _(u'Ukraine')),
('AE', _(u'United Arab Emirates')),
('GB', _(u'United Kingdom')),
('TZ', _(u'United Republic of Tanzania')),
('US', _(u'United States')),
('UM', _(u'United States Minor Outlying Islands')),
('UY', _(u'Uruguay')),
('UZ', _(u'Uzbekistan')),
('VU', _(u'Vanuatu')),
('VE', _(u'Venezuela, Bolivarian Republic of')),
('VN', _(u'Viet Nam')),
('VG', _(u'Virgin Islands, British')),
('VI', _(u'Virgin Islands, U.S.')),
('WF', _(u'Wallis and Futuna')),
('EH', _(u'Western Sahara')),
('YE', _(u'Yemen')),
('ZM', _(u'Zambia')),
('ZW', _(u'Zimbabwe')),
)
# Official capitalized country names.
OFFICIAL_COUNTRIES = {
'AF': u'AFGHANISTAN',
'AX': u'\xc5LAND ISLANDS',
'AL': u'ALBANIA',
'DZ': u'ALGERIA',
'AS': u'AMERICAN SAMOA',
'AD': u'ANDORRA',
'AO': u'ANGOLA',
'AI': u'ANGUILLA',
'AQ': u'ANTARCTICA',
'AG': u'ANTIGUA AND BARBUDA',
'AR': u'ARGENTINA',
'AM': u'ARMENIA',
'AW': u'ARUBA',
'AU': u'AUSTRALIA',
'AT': u'AUSTRIA',
'AZ': u'AZERBAIJAN',
'BS': u'BAHAMAS',
'BH': u'BAHRAIN',
'BD': u'BANGLADESH',
'BB': u'BARBADOS',
'BY': u'BELARUS',
'BE': u'BELGIUM',
'BZ': u'BELIZE',
'BJ': u'BENIN',
'BM': u'BERMUDA',
'BT': u'BHUTAN',
'BO': u'BOLIVIA, PLURINATIONAL STATE OF',
'BQ': u'BONAIRE, SINT EUSTATIUS AND SABA',
'BA': u'BOSNIA AND HERZEGOVINA',
'BW': u'BOTSWANA',
'BV': u'BOUVET ISLAND',
'BR': u'BRAZIL',
'IO': u'BRITISH INDIAN OCEAN TERRITORY',
'BN': u'BRUNEI DARUSSALAM',
'BG': u'BULGARIA',
'BF': u'BURKINA FASO',
'BI': u'BURUNDI',
'KH': u'CAMBODIA',
'CM': u'CAMEROON',
'CA': u'CANADA',
'CV': u'CAPE VERDE',
'KY': u'CAYMAN ISLANDS',
'CF': u'CENTRAL AFRICAN REPUBLIC',
'TD': u'CHAD',
'CL': u'CHILE',
'CN': u'CHINA',
'CX': u'CHRISTMAS ISLAND',
'CC': u'COCOS (KEELING) ISLANDS',
'CO': u'COLOMBIA',
'KM': u'COMOROS',
'CG': u'CONGO',
'CD': u'CONGO, THE DEMOCRATIC REPUBLIC OF THE',
'CK': u'COOK ISLANDS',
'CR': u'COSTA RICA',
'CI': u"C\xd4TE D'IVOIRE",
'HR': u'CROATIA',
'CU': u'CUBA',
'CW': u'CURA\xc7AO',
'CY': u'CYPRUS',
'CZ': u'CZECH REPUBLIC',
'DK': u'DENMARK',
'DJ': u'DJIBOUTI',
'DM': u'DOMINICA',
'DO': u'DOMINICAN REPUBLIC',
'EC': u'ECUADOR',
'EG': u'EGYPT',
'SV': u'EL SALVADOR',
'GQ': u'EQUATORIAL GUINEA',
'ER': u'ERITREA',
'EE': u'ESTONIA',
'ET': u'ETHIOPIA',
'FK': u'FALKLAND ISLANDS (MALVINAS)',
'FO': u'FAROE ISLANDS',
'FJ': u'FIJI',
'FI': u'FINLAND',
'FR': u'FRANCE',
'GF': u'FRENCH GUIANA',
'PF': u'FRENCH POLYNESIA',
'TF': u'FRENCH SOUTHERN TERRITORIES',
'GA': u'GABON',
'GM': u'GAMBIA',
'GE': u'GEORGIA',
'DE': u'GERMANY',
'GH': u'GHANA',
'GI': u'GIBRALTAR',
'GR': u'GREECE',
'GL': u'GREENLAND',
'GD': u'GRENADA',
'GP': u'GUADELOUPE',
'GU': u'GUAM',
'GT': u'GUATEMALA',
'GG': u'GUERNSEY',
'GN': u'GUINEA',
'GW': u'GUINEA-BISSAU',
'GY': u'GUYANA',
'HT': u'HAITI',
'HM': u'HEARD ISLAND AND MCDONALD ISLANDS',
'VA': u'HOLY SEE (VATICAN CITY STATE)',
'HN': u'HONDURAS',
'HK': u'HONG KONG',
'HU': u'HUNGARY',
'IS': u'ICELAND',
'IN': u'INDIA',
'ID': u'INDONESIA',
'IR': u'IRAN, ISLAMIC REPUBLIC OF',
'IQ': u'IRAQ',
'IE': u'IRELAND',
'IM': u'ISLE OF MAN',
'IL': u'ISRAEL',
'IT': u'ITALY',
'JM': u'JAMAICA',
'JP': u'JAPAN',
'JE': u'JERSEY',
'JO': u'JORDAN',
'KZ': u'KAZAKHSTAN',
'KE': u'KENYA',
'KI': u'KIRIBATI',
'KP': u"KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF",
'KR': u'KOREA, REPUBLIC OF',
'KW': u'KUWAIT',
'KG': u'KYRGYZSTAN',
'LA': u"LAO PEOPLE'S DEMOCRATIC REPUBLIC",
'LV': u'LATVIA',
'LB': u'LEBANON',
'LS': u'LESOTHO',
'LR': u'LIBERIA',
'LY': u'LIBYAN ARAB JAMAHIRIYA',
'LI': u'LIECHTENSTEIN',
'LT': u'LITHUANIA',
'LU': u'LUXEMBOURG',
'MO': u'MACAO',
'MK': u'MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF',
'MG': u'MADAGASCAR',
'MW': u'MALAWI',
'MY': u'MALAYSIA',
'MV': u'MALDIVES',
'ML': u'MALI',
'MT': u'MALTA',
'MH': u'MARSHALL ISLANDS',
'MQ': u'MARTINIQUE',
'MR': u'MAURITANIA',
'MU': u'MAURITIUS',
'YT': u'MAYOTTE',
'MX': u'MEXICO',
'FM': u'MICRONESIA, FEDERATED STATES OF',
'MD': u'MOLDOVA, REPUBLIC OF',
'MC': u'MONACO',
'MN': u'MONGOLIA',
'ME': u'MONTENEGRO',
'MS': u'MONTSERRAT',
'MA': u'MOROCCO',
'MZ': u'MOZAMBIQUE',
'MM': u'MYANMAR',
'NA': u'NAMIBIA',
'NR': u'NAURU',
'NP': u'NEPAL',
'NL': u'NETHERLANDS',
'NC': u'NEW CALEDONIA',
'NZ': u'NEW ZEALAND',
'NI': u'NICARAGUA',
'NE': u'NIGER',
'NG': u'NIGERIA',
'NU': u'NIUE',
'NF': u'NORFOLK ISLAND',
'MP': u'NORTHERN MARIANA ISLANDS',
'NO': u'NORWAY',
'OM': u'OMAN',
'PK': u'PAKISTAN',
'PW': u'PALAU',
'PS': u'PALESTINIAN TERRITORY, OCCUPIED',
'PA': u'PANAMA',
'PG': u'PAPUA NEW GUINEA',
'PY': u'PARAGUAY',
'PE': u'PERU',
'PH': u'PHILIPPINES',
'PN': u'PITCAIRN',
'PL': u'POLAND',
'PT': u'PORTUGAL',
'PR': u'PUERTO RICO',
'QA': u'QATAR',
'RE': u'R\xc9UNION',
'RO': u'ROMANIA',
'RU': u'RUSSIAN FEDERATION',
'RW': u'RWANDA',
'BL': u'SAINT BARTH\xc9LEMY',
'SH': u'SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA',
'KN': u'SAINT KITTS AND NEVIS',
'LC': u'SAINT LUCIA',
'MF': u'SAINT MARTIN (FRENCH PART)',
'PM': u'SAINT PIERRE AND MIQUELON',
'VC': u'SAINT VINCENT AND THE GRENADINES',
'WS': u'SAMOA',
'SM': u'SAN MARINO',
'ST': u'SAO TOME AND PRINCIPE',
'SA': u'SAUDI ARABIA',
'SN': u'SENEGAL',
'RS': u'SERBIA',
'SC': u'SEYCHELLES',
'SL': u'SIERRA LEONE',
'SG': u'SINGAPORE',
'SX': u'SINT MAARTEN (DUTCH PART)',
'SK': u'SLOVAKIA',
'SI': u'SLOVENIA',
'SB': u'SOLOMON ISLANDS',
'SO': u'SOMALIA',
'ZA': u'SOUTH AFRICA',
'GS': u'SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS',
'SS': u'SOUTH SUDAN',
'ES': u'SPAIN',
'LK': u'SRI LANKA',
'SD': u'SUDAN',
'SR': u'SURINAME',
'SJ': u'SVALBARD AND JAN MAYEN',
'SZ': u'SWAZILAND',
'SE': u'SWEDEN',
'CH': u'SWITZERLAND',
'SY': u'SYRIAN ARAB REPUBLIC',
'TW': u'TAIWAN, PROVINCE OF CHINA',
'TJ': u'TAJIKISTAN',
'TZ': u'TANZANIA, UNITED REPUBLIC OF',
'TH': u'THAILAND',
'TL': u'TIMOR-LESTE',
'TG': u'TOGO',
'TK': u'TOKELAU',
'TO': u'TONGA',
'TT': u'TRINIDAD AND TOBAGO',
'TN': u'TUNISIA',
'TR': u'TURKEY',
'TM': u'TURKMENISTAN',
'TC': u'TURKS AND CAICOS ISLANDS',
'TV': u'TUVALU',
'UG': u'UGANDA',
'UA': u'UKRAINE',
'AE': u'UNITED ARAB EMIRATES',
'GB': u'UNITED KINGDOM',
'US': u'UNITED STATES',
'UM': u'UNITED STATES MINOR OUTLYING ISLANDS',
'UY': u'URUGUAY',
'UZ': u'UZBEKISTAN',
'VU': u'VANUATU',
'VE': u'VENEZUELA, BOLIVARIAN REPUBLIC OF',
'VN': u'VIET NAM',
'VG': u'VIRGIN ISLANDS, BRITISH',
'VI': u'VIRGIN ISLANDS, U.S.',
'WF': u'WALLIS AND FUTUNA',
'EH': u'WESTERN SAHARA',
'YE': u'YEMEN',
'ZM': u'ZAMBIA',
'ZW': u'ZIMBABWE',
}
| |
# Copyright 2013 OpenStack Foundation
# Copyright (c) 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for manila.api.v2.quota_sets.py
"""
from unittest import mock
import ddt
from oslo_config import cfg
import webob.exc
import webob.response
from manila.api.openstack import api_version_request as api_version
from manila.api.v2 import quota_sets
from manila import context
from manila import exception
from manila import policy
from manila import test
from manila.tests.api import fakes
from manila import utils
CONF = cfg.CONF
sg_quota_keys = ['share_groups', 'share_group_snapshots']
replica_quota_keys = ['share_replicas']
per_share_size_quota_keys = ['per_share_gigabytes']
def _get_request(is_admin, user_in_url):
req = mock.MagicMock(
api_version_request=api_version.APIVersionRequest("2.40"))
req.environ = {'manila.context': context.get_admin_context()}
req.environ['manila.context'].is_admin = is_admin
req.environ['manila.context'].auth_token = 'foo_auth_token'
req.environ['manila.context'].project_id = 'foo_project_id'
if user_in_url:
req.environ['manila.context'].user_id = 'foo_user_id'
req.environ['QUERY_STRING'] = 'user_id=foo_user_id'
return req
@ddt.ddt
class QuotaSetsControllerTest(test.TestCase):
def setUp(self):
super(QuotaSetsControllerTest, self).setUp()
self.controller = quota_sets.QuotaSetsController()
self.resource_name = self.controller.resource_name
self.project_id = 'foo_project_id'
self.mock_policy_check = self.mock_object(
policy, 'check_policy', mock.Mock(return_value=True))
@ddt.data(
{"shares": 3, "snapshots": 4, "gigabytes": 5,
"snapshot_gigabytes": 6, "share_networks": 7},
{"shares": -1, "snapshots": -1, "gigabytes": -1,
"snapshot_gigabytes": -1, "share_networks": -1},
{"shares": 13},
{"snapshots": 24},
{"gigabytes": 7},
{"snapshot_gigabytes": 10001},
{"share_networks": 12345},
{"share_groups": 123456},
{"share_group_snapshots": 123456},
)
def test_defaults(self, quotas):
req = _get_request(True, False)
for k, v in quotas.items():
CONF.set_default(k, v, 'quota')
expected = {
'quota_set': {
'id': self.project_id,
'shares': quotas.get('shares', 50),
'gigabytes': quotas.get('gigabytes', 1000),
'snapshots': quotas.get('snapshots', 50),
'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000),
'share_networks': quotas.get('share_networks', 10),
'share_groups': quotas.get('share_groups', 50),
'share_group_snapshots': quotas.get(
'share_group_snapshots', 50),
}
}
result = self.controller.defaults(req, self.project_id)
self.assertEqual(expected, result)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'show')
@ddt.data(
('os-', '1.0', quota_sets.QuotaSetsControllerLegacy, 'defaults'),
('os-', '2.6', quota_sets.QuotaSetsControllerLegacy, 'defaults'),
('', '2.7', quota_sets.QuotaSetsController, 'defaults'),
('os-', '1.0', quota_sets.QuotaSetsControllerLegacy, 'show'),
('os-', '2.6', quota_sets.QuotaSetsControllerLegacy, 'show'),
('', '2.7', quota_sets.QuotaSetsController, 'show'),
)
@ddt.unpack
def test_get_quotas_with_different_api_versions(self, url, version,
controller, method_name):
expected = {
'quota_set': {
'id': self.project_id,
'shares': 50,
'gigabytes': 1000,
'snapshots': 50,
'snapshot_gigabytes': 1000,
'share_networks': 10,
}
}
req = fakes.HTTPRequest.blank(
'/fooproject/%squota-sets' % url,
version=version, use_admin_context=True)
result = getattr(controller(), method_name)(req, self.project_id)
self.assertEqual(expected, result)
@staticmethod
def _get_share_type_request_object(microversion=None):
req = _get_request(True, False)
req.environ['QUERY_STRING'] = 'share_type=fake_share_type_name_or_id'
req.api_version_request = api_version.APIVersionRequest(
microversion or '2.39')
return req
@ddt.data('2.39', '2.40')
def test_share_type_quota_detail(self, microversion):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(return_value={'id': 'fake_st_id'}))
req = self._get_share_type_request_object(microversion)
quotas = {
"shares": 23,
"snapshots": 34,
"gigabytes": 45,
"snapshot_gigabytes": 56,
}
expected = {'quota_set': {
'id': self.project_id,
'shares': {
'in_use': 0,
'limit': quotas['shares'],
'reserved': 0,
},
'gigabytes': {
'in_use': 0,
'limit': quotas['gigabytes'],
'reserved': 0,
},
'snapshots': {
'in_use': 0,
'limit': quotas['snapshots'],
'reserved': 0,
},
'snapshot_gigabytes': {
'in_use': 0,
'limit': quotas['snapshot_gigabytes'],
'reserved': 0,
},
}}
for k, v in quotas.items():
CONF.set_default(k, v, 'quota')
result = self.controller.detail(req, self.project_id)
self.assertEqual(expected, result)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'show')
quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with(
req.environ['manila.context'], 'fake_share_type_name_or_id')
@ddt.data('2.39', '2.40')
def test_show_share_type_quota(self, microversion):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(return_value={'id': 'fake_st_id'}))
req = self._get_share_type_request_object(microversion)
quotas = {
"shares": 23,
"snapshots": 34,
"gigabytes": 45,
"snapshot_gigabytes": 56,
}
expected = {
'quota_set': {
'id': self.project_id,
'shares': quotas.get('shares', 50),
'gigabytes': quotas.get('gigabytes', 1000),
'snapshots': quotas.get('snapshots', 50),
'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000),
}
}
for k, v in quotas.items():
CONF.set_default(k, v, 'quota')
result = self.controller.show(req, self.project_id)
self.assertEqual(expected, result)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'show')
quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with(
req.environ['manila.context'], 'fake_share_type_name_or_id')
@ddt.data('show', 'detail')
def test_get_share_type_quota_with_old_microversion(self, method):
req = self._get_share_type_request_object('2.38')
self.assertRaises(
webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, self.project_id)
@ddt.data((None, None), (None, 'foo'), ('bar', None))
@ddt.unpack
def test__validate_user_id_and_share_type_args(self, user_id, st_id):
result = self.controller._validate_user_id_and_share_type_args(
user_id, st_id)
self.assertIsNone(result)
def test__validate_user_id_and_share_type_args_exception(self):
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._validate_user_id_and_share_type_args,
'foo', 'bar')
def test__get_share_type_id_found(self):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(return_value={'id': 'fake_st_id'}))
ctxt = 'fake_context'
share_type = 'fake_share_type_name_or_id'
result = self.controller._get_share_type_id(ctxt, share_type)
self.assertEqual('fake_st_id', result)
def test__get_share_type_id_not_found(self):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(return_value=None))
ctxt = 'fake_context'
share_type = 'fake_share_type_name_or_id'
self.assertRaises(
webob.exc.HTTPNotFound,
self.controller._get_share_type_id,
ctxt, share_type)
def test__get_share_type_id_is_not_provided(self):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(return_value={'id': 'fake_st_id'}))
ctxt = 'fake_context'
result = self.controller._get_share_type_id(ctxt, None)
self.assertIsNone(result)
@ddt.data(
({}, sg_quota_keys, '2.40'),
({"quota_set": {}}, sg_quota_keys, '2.40'),
({"quota_set": {"foo": "bar"}}, sg_quota_keys, '2.40'),
({"foo": "bar"}, replica_quota_keys, '2.53'),
({"quota_set": {"foo": "bar"}}, replica_quota_keys, '2.53'),
({"quota_set": {"foo": "bar"}}, per_share_size_quota_keys, '2.62'),
)
@ddt.unpack
def test__ensure_specific_microversion_args_are_absent_success(
self, body, keys, microversion):
result = self.controller._ensure_specific_microversion_args_are_absent(
body, keys, microversion)
self.assertIsNone(result)
@ddt.data(
({"share_groups": 5}, sg_quota_keys, '2.40'),
({"share_group_snapshots": 6}, sg_quota_keys, '2.40'),
({"quota_set": {"share_groups": 7}}, sg_quota_keys, '2.40'),
({"quota_set": {"share_group_snapshots": 8}}, sg_quota_keys, '2.40'),
({"quota_set": {"share_replicas": 9}}, replica_quota_keys, '2.53'),
({"quota_set": {"share_replicas": 10}}, replica_quota_keys, '2.53'),
({"quota_set": {"per_share_gigabytes": 10}},
per_share_size_quota_keys, '2.62'),
)
@ddt.unpack
def test__ensure_specific_microversion_args_are_absent_error(
self, body, keys, microversion):
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._ensure_specific_microversion_args_are_absent,
body,
keys,
microversion
)
@ddt.data(_get_request(True, True), _get_request(True, False))
def test__ensure_share_type_arg_is_absent(self, req):
result = self.controller._ensure_share_type_arg_is_absent(req)
self.assertIsNone(result)
def test__ensure_share_type_arg_is_absent_exception(self):
req = self._get_share_type_request_object('2.39')
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller._ensure_share_type_arg_is_absent,
req)
@ddt.data(_get_request(True, True), _get_request(True, False))
def test_quota_detail(self, request):
request.api_version_request = api_version.APIVersionRequest('2.25')
quotas = {
"shares": 23,
"snapshots": 34,
"gigabytes": 45,
"snapshot_gigabytes": 56,
"share_networks": 67,
}
expected = {
'quota_set': {
'id': self.project_id,
'shares': {'in_use': 0,
'limit': quotas['shares'],
'reserved': 0},
'gigabytes': {'in_use': 0,
'limit': quotas['gigabytes'], 'reserved': 0},
'snapshots': {'in_use': 0,
'limit': quotas['snapshots'], 'reserved': 0},
'snapshot_gigabytes': {
'in_use': 0,
'limit': quotas['snapshot_gigabytes'],
'reserved': 0,
},
'share_networks': {
'in_use': 0,
'limit': quotas['share_networks'],
'reserved': 0
},
}
}
for k, v in quotas.items():
CONF.set_default(k, v, 'quota')
result = self.controller.detail(request, self.project_id)
self.assertEqual(expected, result)
self.mock_policy_check.assert_called_once_with(
request.environ['manila.context'], self.resource_name, 'show')
@ddt.data(_get_request(True, True), _get_request(True, False))
def test_show_quota(self, request):
quotas = {
"shares": 23,
"snapshots": 34,
"gigabytes": 45,
"snapshot_gigabytes": 56,
"share_networks": 67,
"share_groups": 53,
"share_group_snapshots": 57,
}
expected = {
'quota_set': {
'id': self.project_id,
'shares': quotas.get('shares', 50),
'gigabytes': quotas.get('gigabytes', 1000),
'snapshots': quotas.get('snapshots', 50),
'snapshot_gigabytes': quotas.get('snapshot_gigabytes', 1000),
'share_networks': quotas.get('share_networks', 10),
'share_groups': quotas.get('share_groups', 50),
'share_group_snapshots': quotas.get(
'share_group_snapshots', 50),
}
}
for k, v in quotas.items():
CONF.set_default(k, v, 'quota')
result = self.controller.show(request, self.project_id)
self.assertEqual(expected, result)
self.mock_policy_check.assert_called_once_with(
request.environ['manila.context'], self.resource_name, 'show')
def test_show_quota_not_authorized(self):
req = _get_request(True, False)
self.mock_object(
quota_sets.db,
'authorize_project_context',
mock.Mock(side_effect=exception.NotAuthorized))
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.show,
req, self.project_id)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'show')
@ddt.data(_get_request(True, True), _get_request(True, False))
def test_update_quota(self, request):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(
return_value={'id': 'fake_st_id', 'name': 'fake_st_name'}))
CONF.set_default('shares', 789, 'quota')
body = {'quota_set': {'tenant_id': self.project_id, 'shares': 788}}
expected = {
'quota_set': {
'shares': body['quota_set']['shares'],
'gigabytes': 1000,
'snapshots': 50,
'snapshot_gigabytes': 1000,
'share_networks': 10,
'share_groups': 50,
'share_group_snapshots': 50,
}
}
mock_policy_update_check_call = mock.call(
request.environ['manila.context'], self.resource_name, 'update')
mock_policy_show_check_call = mock.call(
request.environ['manila.context'], self.resource_name, 'show')
update_result = self.controller.update(
request, self.project_id, body=body)
self.assertEqual(expected, update_result)
show_result = self.controller.show(request, self.project_id)
expected['quota_set']['id'] = self.project_id
self.assertEqual(expected, show_result)
self.mock_policy_check.assert_has_calls([
mock_policy_update_check_call, mock_policy_show_check_call])
quota_sets.db.share_type_get_by_name_or_id.assert_not_called()
@ddt.data(_get_request(True, True), _get_request(True, False))
def test_update_quota_with_value_greater_than_2147483647(self, req):
value = 2147483648
body = {'quota_set': {'tenant_id': self.project_id, 'shares': value}}
if req == _get_request(True, True):
self.mock_policy_update_check_call = mock.call(
req.environ['manila.context'], self.resource_name, 'update')
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body
)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
if req == _get_request(True, False):
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body
)
self.mock_policy_check.assert_not_called()
@ddt.data('2.39', '2.40')
def test_update_share_type_quota(self, microversion):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(
return_value={'id': 'fake_st_id', 'name': 'fake_st_name'}))
req = self._get_share_type_request_object(microversion)
CONF.set_default('shares', 789, 'quota')
body = {'quota_set': {'tenant_id': self.project_id, 'shares': 788}}
expected = {
'quota_set': {
'shares': body['quota_set']['shares'],
'gigabytes': 1000,
'snapshots': 50,
'snapshot_gigabytes': 1000,
}
}
update_result = self.controller.update(req, self.project_id, body=body)
self.assertEqual(expected, update_result)
quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with(
req.environ['manila.context'],
req.environ['QUERY_STRING'].split('=')[-1])
quota_sets.db.share_type_get_by_name_or_id.reset_mock()
show_result = self.controller.show(req, self.project_id)
expected['quota_set']['id'] = self.project_id
self.assertEqual(expected, show_result)
self.mock_policy_check.assert_has_calls([
mock.call(req.environ['manila.context'], self.resource_name, key)
for key in ('update', 'show')
])
quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with(
req.environ['manila.context'],
req.environ['QUERY_STRING'].split('=')[-1])
def test_update_share_type_quota_using_too_old_microversion(self):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(
return_value={'id': 'fake_st_id', 'name': 'fake_st_name'}))
req = self._get_share_type_request_object('2.38')
body = {'quota_set': {'tenant_id': self.project_id, 'shares': 788}}
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body=body)
quota_sets.db.share_type_get_by_name_or_id.assert_not_called()
def test_update_share_type_quota_for_share_networks(self):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(
return_value={'id': 'fake_st_id', 'name': 'fake_st_name'}))
req = self._get_share_type_request_object('2.39')
body = {'quota_set': {
'tenant_id': self.project_id, 'share_networks': 788,
}}
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body=body)
quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with(
req.environ['manila.context'],
req.environ['QUERY_STRING'].split('=')[-1])
@ddt.data(-2, 'foo', {1: 2}, [1])
def test_update_quota_with_invalid_value(self, value):
req = _get_request(True, False)
body = {'quota_set': {'tenant_id': self.project_id, 'shares': value}}
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body=body)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
def test_user_quota_can_not_be_bigger_than_tenant_quota(self):
value = 777
CONF.set_default('shares', value, 'quota')
body = {
'quota_set': {
'tenant_id': self.project_id,
'shares': value + 1,
}
}
req = _get_request(True, True)
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body=body)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
def test_update_inexistent_quota(self):
body = {
'quota_set': {
'tenant_id': self.project_id,
'fake_quota': 13,
}
}
req = _get_request(True, False)
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body=body)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
def test_update_quota_not_authorized(self):
body = {'quota_set': {'tenant_id': self.project_id, 'shares': 13}}
req = _get_request(False, False)
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.update,
req, self.project_id, body=body)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
@ddt.data(
('os-quota-sets', '1.0', quota_sets.QuotaSetsControllerLegacy),
('os-quota-sets', '2.6', quota_sets.QuotaSetsControllerLegacy),
('quota-sets', '2.7', quota_sets.QuotaSetsController),
)
@ddt.unpack
def test_update_all_quotas_with_force(self, url, version, controller):
req = fakes.HTTPRequest.blank(
'/fooproject/%s' % url, version=version, use_admin_context=True)
quotas = (
('quota_shares', 13),
('quota_gigabytes', 14),
('quota_snapshots', 15),
('quota_snapshot_gigabytes', 16),
('quota_share_networks', 17),
)
for quota, value in quotas:
CONF.set_default(quota, value)
expected = {
'quota_set': {
'tenant_id': self.project_id,
'shares': quotas[0][1],
'gigabytes': quotas[1][1],
'snapshots': quotas[2][1],
'snapshot_gigabytes': quotas[3][1],
'share_networks': quotas[4][1],
'force': True,
}
}
update_result = controller().update(
req, self.project_id, body=expected)
expected['quota_set'].pop('force')
expected['quota_set'].pop('tenant_id')
self.assertEqual(expected, update_result)
show_result = controller().show(req, self.project_id)
expected['quota_set']['id'] = self.project_id
self.assertEqual(expected, show_result)
self.mock_policy_check.assert_has_calls([
mock.call(req.environ['manila.context'],
self.resource_name, action)
for action in ('update', 'show')
])
@ddt.data(
('os-quota-sets', '1.0', quota_sets.QuotaSetsControllerLegacy),
('os-quota-sets', '2.6', quota_sets.QuotaSetsControllerLegacy),
('quota-sets', '2.7', quota_sets.QuotaSetsController),
)
@ddt.unpack
def test_delete_tenant_quota(self, url, version, controller):
self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user')
self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project')
req = fakes.HTTPRequest.blank(
'/fooproject/%s' % url, version=version, use_admin_context=True)
result = controller().delete(req, self.project_id)
self.assertTrue(
utils.IsAMatcher(webob.response.Response) == result
)
self.assertTrue(hasattr(result, 'status_code'))
self.assertEqual(202, result.status_code)
self.assertFalse(
quota_sets.QUOTAS.destroy_all_by_project_and_user.called)
quota_sets.QUOTAS.destroy_all_by_project.assert_called_once_with(
req.environ['manila.context'], self.project_id)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'delete')
def test_delete_user_quota(self):
project_id = 'foo_project_id'
self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user')
self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project')
req = _get_request(True, True)
result = self.controller.delete(req, project_id)
self.assertTrue(
utils.IsAMatcher(webob.response.Response) == result
)
self.assertTrue(hasattr(result, 'status_code'))
self.assertEqual(202, result.status_code)
(quota_sets.QUOTAS.destroy_all_by_project_and_user.
assert_called_once_with(
req.environ['manila.context'],
project_id,
req.environ['manila.context'].user_id))
self.assertFalse(quota_sets.QUOTAS.destroy_all_by_project.called)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'delete')
def test_delete_share_type_quota(self):
req = self._get_share_type_request_object('2.39')
self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project')
self.mock_object(quota_sets.QUOTAS, 'destroy_all_by_project_and_user')
mock_delete_st_quotas = self.mock_object(
quota_sets.QUOTAS, 'destroy_all_by_project_and_share_type')
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(
return_value={'id': 'fake_st_id', 'name': 'fake_st_name'}))
result = self.controller.delete(req, self.project_id)
self.assertEqual(utils.IsAMatcher(webob.response.Response), result)
self.assertTrue(hasattr(result, 'status_code'))
self.assertEqual(202, result.status_code)
mock_delete_st_quotas.assert_called_once_with(
req.environ['manila.context'], self.project_id, 'fake_st_id')
quota_sets.QUOTAS.destroy_all_by_project.assert_not_called()
quota_sets.QUOTAS.destroy_all_by_project_and_user.assert_not_called()
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'delete')
quota_sets.db.share_type_get_by_name_or_id.assert_called_once_with(
req.environ['manila.context'],
req.environ['QUERY_STRING'].split('=')[-1])
def test_delete_share_type_quota_using_too_old_microversion(self):
self.mock_object(
quota_sets.db, 'share_type_get_by_name_or_id',
mock.Mock(
return_value={'id': 'fake_st_id', 'name': 'fake_st_name'}))
req = self._get_share_type_request_object('2.38')
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.delete,
req, self.project_id)
quota_sets.db.share_type_get_by_name_or_id.assert_not_called()
def test_delete_not_authorized(self):
req = _get_request(False, False)
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.delete,
req, self.project_id)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'delete')
@ddt.data(
('os-quota-sets', '2.7', quota_sets.QuotaSetsControllerLegacy),
('quota-sets', '2.6', quota_sets.QuotaSetsController),
('quota-sets', '2.0', quota_sets.QuotaSetsController),
)
@ddt.unpack
def test_api_not_found(self, url, version, controller):
req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version)
for method_name in ('show', 'defaults', 'delete'):
self.assertRaises(
exception.VersionNotFoundForAPIMethod,
getattr(controller(), method_name),
req, self.project_id)
@ddt.data(
('os-quota-sets', '2.7', quota_sets.QuotaSetsControllerLegacy),
('quota-sets', '2.6', quota_sets.QuotaSetsController),
('quota-sets', '2.0', quota_sets.QuotaSetsController),
)
@ddt.unpack
def test_update_api_not_found(self, url, version, controller):
req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version)
self.assertRaises(
exception.VersionNotFoundForAPIMethod,
controller().update,
req, self.project_id)
def test_update_without_quota(self):
body = {
'quota_set': {
'tenant_id': self.project_id,
}
}
req = _get_request(True, False)
self.assertRaises(
webob.exc.HTTPBadRequest,
self.controller.update,
req, self.project_id, body=body)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
| |
#!/usr/bin/env python
# Try to determine how much RAM is currently being used per program.
# Note per _program_, not per process. So for example this script
# will report RAM used by all httpd process together. In detail it reports:
# sum(private RAM for program processes) + sum(Shared RAM for program processes)
# The shared RAM is problematic to calculate, and this script automatically
# selects the most accurate method available for your kernel.
# Licence: LGPLv2
# Author: P@draigBrady.com
# Source: http://www.pixelbeat.org/scripts/ps_mem.py
# V1.0 06 Jul 2005 Initial release
# V1.1 11 Aug 2006 root permission required for accuracy
# V1.2 08 Nov 2006 Add total to output
# Use KiB,MiB,... for units rather than K,M,...
# V1.3 22 Nov 2006 Ignore shared col from /proc/$pid/statm for
# 2.6 kernels up to and including 2.6.9.
# There it represented the total file backed extent
# V1.4 23 Nov 2006 Remove total from output as it's meaningless
# (the shared values overlap with other programs).
# Display the shared column. This extra info is
# useful, especially as it overlaps between programs.
# V1.5 26 Mar 2007 Remove redundant recursion from human()
# V1.6 05 Jun 2007 Also report number of processes with a given name.
# Patch from riccardo.murri@gmail.com
# V1.7 20 Sep 2007 Use PSS from /proc/$pid/smaps if available, which
# fixes some over-estimation and allows totalling.
# Enumerate the PIDs directly rather than using ps,
# which fixes the possible race between reading
# RSS with ps, and shared memory with this program.
# Also we can show non truncated command names.
# V1.8 28 Sep 2007 More accurate matching for stats in /proc/$pid/smaps
# as otherwise could match libraries causing a crash.
# Patch from patrice.bouchand.fedora@gmail.com
# V1.9 20 Feb 2008 Fix invalid values reported when PSS is available.
# Reported by Andrey Borzenkov <arvidjaar@mail.ru>
# V3.3 24 Jun 2014
# http://github.com/pixelb/scripts/commits/master/scripts/ps_mem.py
# Notes:
#
# All interpreted programs where the interpreter is started
# by the shell or with env, will be merged to the interpreter
# (as that's what's given to exec). For e.g. all python programs
# starting with "#!/usr/bin/env python" will be grouped under python.
# You can change this by using the full command line but that will
# have the undesirable affect of splitting up programs started with
# differing parameters (for e.g. mingetty tty[1-6]).
#
# For 2.6 kernels up to and including 2.6.13 and later 2.4 redhat kernels
# (rmap vm without smaps) it can not be accurately determined how many pages
# are shared between processes in general or within a program in our case:
# http://lkml.org/lkml/2005/7/6/250
# A warning is printed if overestimation is possible.
# In addition for 2.6 kernels up to 2.6.9 inclusive, the shared
# value in /proc/$pid/statm is the total file-backed extent of a process.
# We ignore that, introducing more overestimation, again printing a warning.
# Since kernel 2.6.23-rc8-mm1 PSS is available in smaps, which allows
# us to calculate a more accurate value for the total RAM used by programs.
#
# Programs that use CLONE_VM without CLONE_THREAD are discounted by assuming
# they're the only programs that have the same /proc/$PID/smaps file for
# each instance. This will fail if there are multiple real instances of a
# program that then use CLONE_VM without CLONE_THREAD, or if a clone changes
# its memory map while we're checksumming each /proc/$PID/smaps.
#
# I don't take account of memory allocated for a program
# by other programs. For e.g. memory used in the X server for
# a program could be determined, but is not.
#
# FreeBSD is supported if linprocfs is mounted at /compat/linux/proc/
# FreeBSD 8.0 supports up to a level of Linux 2.6.16
import getopt
import time
import errno
import os
import sys
try:
# md5 module is deprecated on python 2.6
# so try the newer hashlib first
import hashlib
md5_new = hashlib.md5
except ImportError:
import md5
md5_new = md5.new
# The following exits cleanly on Ctrl-C or EPIPE
# while treating other exceptions as before.
def std_exceptions(etype, value, tb):
sys.excepthook = sys.__excepthook__
if issubclass(etype, KeyboardInterrupt):
pass
elif issubclass(etype, IOError) and value.errno == errno.EPIPE:
pass
else:
sys.__excepthook__(etype, value, tb)
sys.excepthook = std_exceptions
#
# Define some global variables
#
PAGESIZE = os.sysconf("SC_PAGE_SIZE") / 1024 #KiB
our_pid = os.getpid()
have_pss = 0
class Proc:
def __init__(self):
uname = os.uname()
if uname[0] == "FreeBSD":
self.proc = '/compat/linux/proc'
else:
self.proc = '/proc'
def path(self, *args):
return os.path.join(self.proc, *(str(a) for a in args))
def open(self, *args):
try:
return open(self.path(*args))
except (IOError, OSError):
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
proc = Proc()
#
# Functions
#
def parse_options():
try:
long_options = ['split-args', 'help', 'total']
opts, args = getopt.getopt(sys.argv[1:], "shtp:w:", long_options)
except getopt.GetoptError:
sys.stderr.write(help())
sys.exit(3)
if len(args):
sys.stderr.write("Extraneous arguments: %s\n" % args)
sys.exit(3)
# ps_mem.py options
split_args = False
pids_to_show = None
watch = None
only_total = False
for o, a in opts:
if o in ('-s', '--split-args'):
split_args = True
if o in ('-t', '--total'):
only_total = True
if o in ('-h', '--help'):
sys.stdout.write(help())
sys.exit(0)
if o in ('-p',):
try:
pids_to_show = [int(x) for x in a.split(',')]
except:
sys.stderr.write(help())
sys.exit(3)
if o in ('-w',):
try:
watch = int(a)
except:
sys.stderr.write(help())
sys.exit(3)
return (split_args, pids_to_show, watch, only_total)
def help():
help_msg = 'Usage: ps_mem [OPTION]...\n' \
'Show program core memory usage\n' \
'\n' \
' -h, -help Show this help\n' \
' -p <pid>[,pid2,...pidN] Only show memory usage PIDs in the specified list\n' \
' -s, --split-args Show and separate by, all command line arguments\n' \
' -t, --total Show only the total value\n' \
' -w <N> Measure and show process memory every N seconds\n'
return help_msg
#(major,minor,release)
def kernel_ver():
kv = proc.open('sys/kernel/osrelease').readline().split(".")[:3]
last = len(kv)
if last == 2:
kv.append('0')
last -= 1
while last > 0:
for char in "-_":
kv[last] = kv[last].split(char)[0]
try:
int(kv[last])
except:
kv[last] = 0
last -= 1
return (int(kv[0]), int(kv[1]), int(kv[2]))
#return Private,Shared
#Note shared is always a subset of rss (trs is not always)
def getMemStats(pid):
global have_pss
mem_id = pid #unique
Private_lines = []
Shared_lines = []
Pss_lines = []
Rss = (int(proc.open(pid, 'statm').readline().split()[1])
* PAGESIZE)
if os.path.exists(proc.path(pid, 'smaps')): #stat
digester = md5_new()
for line in proc.open(pid, 'smaps').readlines(): #open
# Note we checksum smaps as maps is usually but
# not always different for separate processes.
digester.update(line.encode('latin1'))
if line.startswith("Shared"):
Shared_lines.append(line)
elif line.startswith("Private"):
Private_lines.append(line)
elif line.startswith("Pss"):
have_pss = 1
Pss_lines.append(line)
mem_id = digester.hexdigest()
Shared = sum([int(line.split()[1]) for line in Shared_lines])
Private = sum([int(line.split()[1]) for line in Private_lines])
#Note Shared + Private = Rss above
#The Rss in smaps includes video card mem etc.
if have_pss:
pss_adjust = 0.5 # add 0.5KiB as this avg error due to trunctation
Pss = sum([float(line.split()[1])+pss_adjust for line in Pss_lines])
Shared = Pss - Private
elif (2,6,1) <= kernel_ver() <= (2,6,9):
Shared = 0 #lots of overestimation, but what can we do?
Private = Rss
else:
Shared = int(proc.open(pid, 'statm').readline().split()[2])
Shared *= PAGESIZE
Private = Rss - Shared
return (Private, Shared, mem_id)
def getCmdName(pid, split_args):
cmdline = proc.open(pid, 'cmdline').read().split("\0")
if cmdline[-1] == '' and len(cmdline) > 1:
cmdline = cmdline[:-1]
path = proc.path(pid, 'exe')
try:
path = os.readlink(path)
# Some symlink targets were seen to contain NULs on RHEL 5 at least
# https://github.com/pixelb/scripts/pull/10, so take string up to NUL
path = path.split('\0')[0]
except OSError:
val = sys.exc_info()[1]
if (val.errno == errno.ENOENT or # either kernel thread or process gone
val.errno == errno.EPERM):
raise LookupError
raise
if split_args:
return " ".join(cmdline)
if path.endswith(" (deleted)"):
path = path[:-10]
if os.path.exists(path):
path += " [updated]"
else:
#The path could be have prelink stuff so try cmdline
#which might have the full path present. This helped for:
#/usr/libexec/notification-area-applet.#prelink#.fX7LCT (deleted)
if os.path.exists(cmdline[0]):
path = cmdline[0] + " [updated]"
else:
path += " [deleted]"
exe = os.path.basename(path)
cmd = proc.open(pid, 'status').readline()[6:-1]
if exe.startswith(cmd):
cmd = exe #show non truncated version
#Note because we show the non truncated name
#one can have separated programs as follows:
#584.0 KiB + 1.0 MiB = 1.6 MiB mozilla-thunder (exe -> bash)
# 56.0 MiB + 22.2 MiB = 78.2 MiB mozilla-thunderbird-bin
return cmd
#The following matches "du -h" output
#see also human.py
def human(num, power="Ki", units=None):
if units is None:
powers = ["Ki", "Mi", "Gi", "Ti"]
while num >= 1000: #4 digits
num /= 1024.0
power = powers[powers.index(power)+1]
return "%.1f %sB" % (num, power)
else:
return "%.f" % ((num * 1024) / units)
def cmd_with_count(cmd, count):
if count > 1:
return "%s (%u)" % (cmd, count)
else:
return cmd
#Warn of possible inaccuracies
#2 = accurate & can total
#1 = accurate only considering each process in isolation
#0 = some shared mem not reported
#-1= all shared mem not reported
def shared_val_accuracy():
"""http://wiki.apache.org/spamassassin/TopSharedMemoryBug"""
kv = kernel_ver()
pid = os.getpid()
if kv[:2] == (2,4):
if proc.open('meminfo').read().find("Inact_") == -1:
return 1
return 0
elif kv[:2] == (2,6):
if os.path.exists(proc.path(pid, 'smaps')):
if proc.open(pid, 'smaps').read().find("Pss:")!=-1:
return 2
else:
return 1
if (2,6,1) <= kv <= (2,6,9):
return -1
return 0
elif kv[0] > 2 and os.path.exists(proc.path(pid, 'smaps')):
return 2
else:
return 1
def show_shared_val_accuracy( possible_inacc, only_total=False ):
level = ("Warning","Error")[only_total]
if possible_inacc == -1:
sys.stderr.write(
"%s: Shared memory is not reported by this system.\n" % level
)
sys.stderr.write(
"Values reported will be too large, and totals are not reported\n"
)
elif possible_inacc == 0:
sys.stderr.write(
"%s: Shared memory is not reported accurately by this system.\n" % level
)
sys.stderr.write(
"Values reported could be too large, and totals are not reported\n"
)
elif possible_inacc == 1:
sys.stderr.write(
"%s: Shared memory is slightly over-estimated by this system\n"
"for each program, so totals are not reported.\n" % level
)
sys.stderr.close()
if only_total and possible_inacc != 2:
sys.exit(1)
def get_memory_usage( pids_to_show, split_args, include_self=False, only_self=False ):
cmds = {}
shareds = {}
mem_ids = {}
count = {}
for pid in os.listdir(proc.path('')):
if not pid.isdigit():
continue
pid = int(pid)
# Some filters
if only_self and pid != our_pid:
continue
if pid == our_pid and not include_self:
continue
if pids_to_show is not None and pid not in pids_to_show:
continue
try:
cmd = getCmdName(pid, split_args)
except LookupError:
#operation not permitted
#kernel threads don't have exe links or
#process gone
continue
try:
private, shared, mem_id = getMemStats(pid)
except RuntimeError:
continue #process gone
if shareds.get(cmd):
if have_pss: #add shared portion of PSS together
shareds[cmd] += shared
elif shareds[cmd] < shared: #just take largest shared val
shareds[cmd] = shared
else:
shareds[cmd] = shared
cmds[cmd] = cmds.setdefault(cmd, 0) + private
if cmd in count:
count[cmd] += 1
else:
count[cmd] = 1
mem_ids.setdefault(cmd, {}).update({mem_id:None})
#Add shared mem for each program
total = 0
for cmd in cmds:
cmd_count = count[cmd]
if len(mem_ids[cmd]) == 1 and cmd_count > 1:
# Assume this program is using CLONE_VM without CLONE_THREAD
# so only account for one of the processes
cmds[cmd] /= cmd_count
if have_pss:
shareds[cmd] /= cmd_count
cmds[cmd] = cmds[cmd] + shareds[cmd]
total += cmds[cmd] #valid if PSS available
sorted_cmds = sorted(cmds.items(), key=lambda x:x[1])
sorted_cmds = [x for x in sorted_cmds if x[1]]
return sorted_cmds, shareds, count, total
def print_header():
sys.stdout.write(" Private + Shared = RAM used\tProgram\n\n")
def print_memory_usage(sorted_cmds, shareds, count, total):
for cmd in sorted_cmds:
sys.stdout.write("%9s + %9s = %9s\t%s\n" %
(human(cmd[1]-shareds[cmd[0]]),
human(shareds[cmd[0]]), human(cmd[1]),
cmd_with_count(cmd[0], count[cmd[0]])))
if have_pss:
sys.stdout.write("%s\n%s%9s\n%s\n" %
("-" * 33, " " * 24, human(total), "=" * 33))
def verify_environment():
if os.geteuid() != 0:
sys.stderr.write("Sorry, root permission required.\n")
if __name__ == '__main__':
sys.stderr.close()
sys.exit(1)
try:
kv = kernel_ver()
except (IOError, OSError):
val = sys.exc_info()[1]
if val.errno == errno.ENOENT:
sys.stderr.write(
"Couldn't access " + proc.path('') + "\n"
"Only GNU/Linux and FreeBSD (with linprocfs) are supported\n")
sys.exit(2)
else:
raise
if __name__ == '__main__':
split_args, pids_to_show, watch, only_total = parse_options()
verify_environment()
if not only_total:
print_header()
if watch is not None:
try:
sorted_cmds = True
while sorted_cmds:
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total, units=1)+'\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
time.sleep(watch)
else:
sys.stdout.write('Process does not exist anymore.\n')
except KeyboardInterrupt:
pass
else:
# This is the default behavior
sorted_cmds, shareds, count, total = get_memory_usage( pids_to_show, split_args )
if only_total and have_pss:
sys.stdout.write(human(total, units=1)+'\n')
elif not only_total:
print_memory_usage(sorted_cmds, shareds, count, total)
# We must close explicitly, so that any EPIPE exception
# is handled by our excepthook, rather than the default
# one which is reenabled after this script finishes.
sys.stdout.close()
vm_accuracy = shared_val_accuracy()
show_shared_val_accuracy( vm_accuracy, only_total )
| |
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from .federation_base import FederationBase
from .units import Transaction, Edu
from synapse.util.logutils import log_function
from synapse.events import FrozenEvent
import synapse.metrics
from synapse.api.errors import FederationError, SynapseError
from synapse.crypto.event_signing import compute_event_signature
import simplejson as json
import logging
logger = logging.getLogger(__name__)
# synapse.federation.federation_server is a silly name
metrics = synapse.metrics.get_metrics_for("synapse.federation.server")
received_pdus_counter = metrics.register_counter("received_pdus")
received_edus_counter = metrics.register_counter("received_edus")
received_queries_counter = metrics.register_counter("received_queries", labels=["type"])
class FederationServer(FederationBase):
def set_handler(self, handler):
"""Sets the handler that the replication layer will use to communicate
receipt of new PDUs from other home servers. The required methods are
documented on :py:class:`.ReplicationHandler`.
"""
self.handler = handler
def register_edu_handler(self, edu_type, handler):
if edu_type in self.edu_handlers:
raise KeyError("Already have an EDU handler for %s" % (edu_type,))
self.edu_handlers[edu_type] = handler
def register_query_handler(self, query_type, handler):
"""Sets the handler callable that will be used to handle an incoming
federation Query of the given type.
Args:
query_type (str): Category name of the query, which should match
the string used by make_query.
handler (callable): Invoked to handle incoming queries of this type
handler is invoked as:
result = handler(args)
where 'args' is a dict mapping strings to strings of the query
arguments. It should return a Deferred that will eventually yield an
object to encode as JSON.
"""
if query_type in self.query_handlers:
raise KeyError(
"Already have a Query handler for %s" % (query_type,)
)
self.query_handlers[query_type] = handler
@defer.inlineCallbacks
@log_function
def on_backfill_request(self, origin, room_id, versions, limit):
pdus = yield self.handler.on_backfill_request(
origin, room_id, versions, limit
)
defer.returnValue((200, self._transaction_from_pdus(pdus).get_dict()))
@defer.inlineCallbacks
@log_function
def on_incoming_transaction(self, transaction_data):
transaction = Transaction(**transaction_data)
received_pdus_counter.inc_by(len(transaction.pdus))
for p in transaction.pdus:
if "unsigned" in p:
unsigned = p["unsigned"]
if "age" in unsigned:
p["age"] = unsigned["age"]
if "age" in p:
p["age_ts"] = int(self._clock.time_msec()) - int(p["age"])
del p["age"]
pdu_list = [
self.event_from_pdu_json(p) for p in transaction.pdus
]
logger.debug("[%s] Got transaction", transaction.transaction_id)
response = yield self.transaction_actions.have_responded(transaction)
if response:
logger.debug(
"[%s] We've already responed to this request",
transaction.transaction_id
)
defer.returnValue(response)
return
logger.debug("[%s] Transaction is new", transaction.transaction_id)
results = []
for pdu in pdu_list:
d = self._handle_new_pdu(transaction.origin, pdu)
try:
yield d
results.append({})
except FederationError as e:
self.send_failure(e, transaction.origin)
results.append({"error": str(e)})
except Exception as e:
results.append({"error": str(e)})
logger.exception("Failed to handle PDU")
if hasattr(transaction, "edus"):
for edu in [Edu(**x) for x in transaction.edus]:
self.received_edu(
transaction.origin,
edu.edu_type,
edu.content
)
for failure in getattr(transaction, "pdu_failures", []):
logger.info("Got failure %r", failure)
logger.debug("Returning: %s", str(results))
response = {
"pdus": dict(zip(
(p.event_id for p in pdu_list), results
)),
}
yield self.transaction_actions.set_response(
transaction,
200, response
)
defer.returnValue((200, response))
def received_edu(self, origin, edu_type, content):
received_edus_counter.inc()
if edu_type in self.edu_handlers:
self.edu_handlers[edu_type](origin, content)
else:
logger.warn("Received EDU of type %s with no handler", edu_type)
@defer.inlineCallbacks
@log_function
def on_context_state_request(self, origin, room_id, event_id):
if event_id:
pdus = yield self.handler.get_state_for_pdu(
origin, room_id, event_id,
)
auth_chain = yield self.store.get_auth_chain(
[pdu.event_id for pdu in pdus]
)
for event in auth_chain:
event.signatures.update(
compute_event_signature(
event,
self.hs.hostname,
self.hs.config.signing_key[0]
)
)
else:
raise NotImplementedError("Specify an event")
defer.returnValue((200, {
"pdus": [pdu.get_pdu_json() for pdu in pdus],
"auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
}))
@defer.inlineCallbacks
@log_function
def on_pdu_request(self, origin, event_id):
pdu = yield self._get_persisted_pdu(origin, event_id)
if pdu:
defer.returnValue(
(200, self._transaction_from_pdus([pdu]).get_dict())
)
else:
defer.returnValue((404, ""))
@defer.inlineCallbacks
@log_function
def on_pull_request(self, origin, versions):
raise NotImplementedError("Pull transactions not implemented")
@defer.inlineCallbacks
def on_query_request(self, query_type, args):
received_queries_counter.inc(query_type)
if query_type in self.query_handlers:
response = yield self.query_handlers[query_type](args)
defer.returnValue((200, response))
else:
defer.returnValue(
(404, "No handler for Query type '%s'" % (query_type,))
)
@defer.inlineCallbacks
def on_make_join_request(self, room_id, user_id):
pdu = yield self.handler.on_make_join_request(room_id, user_id)
time_now = self._clock.time_msec()
defer.returnValue({"event": pdu.get_pdu_json(time_now)})
@defer.inlineCallbacks
def on_invite_request(self, origin, content):
pdu = self.event_from_pdu_json(content)
ret_pdu = yield self.handler.on_invite_request(origin, pdu)
time_now = self._clock.time_msec()
defer.returnValue((200, {"event": ret_pdu.get_pdu_json(time_now)}))
@defer.inlineCallbacks
def on_send_join_request(self, origin, content):
logger.debug("on_send_join_request: content: %s", content)
pdu = self.event_from_pdu_json(content)
logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
res_pdus = yield self.handler.on_send_join_request(origin, pdu)
time_now = self._clock.time_msec()
defer.returnValue((200, {
"state": [p.get_pdu_json(time_now) for p in res_pdus["state"]],
"auth_chain": [
p.get_pdu_json(time_now) for p in res_pdus["auth_chain"]
],
}))
@defer.inlineCallbacks
def on_event_auth(self, origin, room_id, event_id):
time_now = self._clock.time_msec()
auth_pdus = yield self.handler.on_event_auth(event_id)
defer.returnValue((200, {
"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus],
}))
@defer.inlineCallbacks
def on_query_auth_request(self, origin, content, event_id):
"""
Content is a dict with keys::
auth_chain (list): A list of events that give the auth chain.
missing (list): A list of event_ids indicating what the other
side (`origin`) think we're missing.
rejects (dict): A mapping from event_id to a 2-tuple of reason
string and a proof (or None) of why the event was rejected.
The keys of this dict give the list of events the `origin` has
rejected.
Args:
origin (str)
content (dict)
event_id (str)
Returns:
Deferred: Results in `dict` with the same format as `content`
"""
auth_chain = [
self.event_from_pdu_json(e)
for e in content["auth_chain"]
]
signed_auth = yield self._check_sigs_and_hash_and_fetch(
origin, auth_chain, outlier=True
)
ret = yield self.handler.on_query_auth(
origin,
event_id,
signed_auth,
content.get("rejects", []),
content.get("missing", []),
)
time_now = self._clock.time_msec()
send_content = {
"auth_chain": [
e.get_pdu_json(time_now)
for e in ret["auth_chain"]
],
"rejects": ret.get("rejects", []),
"missing": ret.get("missing", []),
}
defer.returnValue(
(200, send_content)
)
@defer.inlineCallbacks
@log_function
def on_query_client_keys(self, origin, content):
query = []
for user_id, device_ids in content.get("device_keys", {}).items():
if not device_ids:
query.append((user_id, None))
else:
for device_id in device_ids:
query.append((user_id, device_id))
results = yield self.store.get_e2e_device_keys(query)
json_result = {}
for user_id, device_keys in results.items():
for device_id, json_bytes in device_keys.items():
json_result.setdefault(user_id, {})[device_id] = json.loads(
json_bytes
)
defer.returnValue({"device_keys": json_result})
@defer.inlineCallbacks
@log_function
def on_claim_client_keys(self, origin, content):
query = []
for user_id, device_keys in content.get("one_time_keys", {}).items():
for device_id, algorithm in device_keys.items():
query.append((user_id, device_id, algorithm))
results = yield self.store.claim_e2e_one_time_keys(query)
json_result = {}
for user_id, device_keys in results.items():
for device_id, keys in device_keys.items():
for key_id, json_bytes in keys.items():
json_result.setdefault(user_id, {})[device_id] = {
key_id: json.loads(json_bytes)
}
defer.returnValue({"one_time_keys": json_result})
@defer.inlineCallbacks
@log_function
def on_get_missing_events(self, origin, room_id, earliest_events,
latest_events, limit, min_depth):
missing_events = yield self.handler.on_get_missing_events(
origin, room_id, earliest_events, latest_events, limit, min_depth
)
time_now = self._clock.time_msec()
defer.returnValue({
"events": [ev.get_pdu_json(time_now) for ev in missing_events],
})
@log_function
def _get_persisted_pdu(self, origin, event_id, do_auth=True):
""" Get a PDU from the database with given origin and id.
Returns:
Deferred: Results in a `Pdu`.
"""
return self.handler.get_persisted_pdu(
origin, event_id, do_auth=do_auth
)
def _transaction_from_pdus(self, pdu_list):
"""Returns a new Transaction containing the given PDUs suitable for
transmission.
"""
time_now = self._clock.time_msec()
pdus = [p.get_pdu_json(time_now) for p in pdu_list]
return Transaction(
origin=self.server_name,
pdus=pdus,
origin_server_ts=int(time_now),
destination=None,
)
@defer.inlineCallbacks
@log_function
def _handle_new_pdu(self, origin, pdu, get_missing=True):
# We reprocess pdus when we have seen them only as outliers
existing = yield self._get_persisted_pdu(
origin, pdu.event_id, do_auth=False
)
# FIXME: Currently we fetch an event again when we already have it
# if it has been marked as an outlier.
already_seen = (
existing and (
not existing.internal_metadata.is_outlier()
or pdu.internal_metadata.is_outlier()
)
)
if already_seen:
logger.debug("Already seen pdu %s", pdu.event_id)
return
# Check signature.
try:
pdu = yield self._check_sigs_and_hash(pdu)
except SynapseError as e:
raise FederationError(
"ERROR",
e.code,
e.msg,
affected=pdu.event_id,
)
state = None
auth_chain = []
have_seen = yield self.store.have_events(
[ev for ev, _ in pdu.prev_events]
)
fetch_state = False
# Get missing pdus if necessary.
if not pdu.internal_metadata.is_outlier():
# We only backfill backwards to the min depth.
min_depth = yield self.handler.get_min_depth_for_context(
pdu.room_id
)
logger.debug(
"_handle_new_pdu min_depth for %s: %d",
pdu.room_id, min_depth
)
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
if min_depth and pdu.depth < min_depth:
# This is so that we don't notify the user about this
# message, to work around the fact that some events will
# reference really really old events we really don't want to
# send to the clients.
pdu.internal_metadata.outlier = True
elif min_depth and pdu.depth > min_depth:
if get_missing and prevs - seen:
latest = yield self.store.get_latest_event_ids_in_room(
pdu.room_id
)
# We add the prev events that we have seen to the latest
# list to ensure the remote server doesn't give them to us
latest = set(latest)
latest |= seen
missing_events = yield self.get_missing_events(
origin,
pdu.room_id,
earliest_events_ids=list(latest),
latest_events=[pdu],
limit=10,
min_depth=min_depth,
)
# We want to sort these by depth so we process them and
# tell clients about them in order.
missing_events.sort(key=lambda x: x.depth)
for e in missing_events:
yield self._handle_new_pdu(
origin,
e,
get_missing=False
)
have_seen = yield self.store.have_events(
[ev for ev, _ in pdu.prev_events]
)
prevs = {e_id for e_id, _ in pdu.prev_events}
seen = set(have_seen.keys())
if prevs - seen:
fetch_state = True
if fetch_state:
# We need to get the state at this event, since we haven't
# processed all the prev events.
logger.debug(
"_handle_new_pdu getting state for %s",
pdu.room_id
)
try:
state, auth_chain = yield self.get_state_for_room(
origin, pdu.room_id, pdu.event_id,
)
except:
logger.warn("Failed to get state for event: %s", pdu.event_id)
yield self.handler.on_receive_pdu(
origin,
pdu,
backfilled=False,
state=state,
auth_chain=auth_chain,
)
def __str__(self):
return "<ReplicationLayer(%s)>" % self.server_name
def event_from_pdu_json(self, pdu_json, outlier=False):
event = FrozenEvent(
pdu_json
)
event.internal_metadata.outlier = outlier
return event
| |
import json
import os
import random
import string
import subprocess
import sys
import time
from urllib import request
from urllib.parse import quote
import requests
from pyquery import pyquery
__author__ = 'Henri Sweers'
# Color class, used for colors in terminal
class Color:
def __init__(self):
pass
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# If you're on mac, install terminal-notifier ("brew install terminal-notifier")
# to get nifty notifications when it's done
def notify_mac(message):
if sys.platform == "darwin":
try:
subprocess.call(
["terminal-notifier", "-message", message, "-title", "FB_Bot",
"-sound", "default"])
except OSError:
print("If you have terminal-notifier, this would be a notification")
# Log method. If there's a color argument, it'll stick that in first
def log(message, *colorargs):
if len(colorargs) > 0:
print(colorargs[0] + message + Color.END)
else:
print(message)
# Convert gifs to gfycat
def gfycat_convert(url_to_convert):
log('--Converting to gfycat')
encoded_url = quote(url_to_convert, '')
# Convert
key = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(8))
transcode_url = 'http://upload.gfycat.com/transcodeRelease/' + key + '?noMd5=true&fetchUrl=' + encoded_url
conversion_response = requests.get(transcode_url)
if conversion_response.status_code == 200:
j = conversion_response.json()
if 'error' in j.keys():
log('----Error: ' + j['error'], Color.RED)
return None
else:
print(conversion_response)
log('----failed', Color.RED)
return "Error"
timeout = 60
while timeout > 0:
status_url = 'http://upload.gfycat.com/status/' + key
status_response = requests.get(status_url)
j = status_response.json()
if 'error' in j.keys():
log('----Error: ' + j['error'], Color.RED)
return None
if 'task' in j.keys() and j['task'] == 'complete':
log('----success', Color.GREEN)
gfyname = j["gfyname"]
return "http://gfycat.com/" + gfyname
timeout -= 1
time.sleep(1)
log("----conversion timed out", Color.RED)
return None
def offsided_convert(title, url_to_convert):
log('--Converting to offsided')
req_data = {
'url': url_to_convert,
'title': title
}
r = requests.post(
'http://offsided.com/api/v1/upload-url',
data=json.dumps(req_data),
headers={
'Content-type': 'application/json',
'Accept': 'application/json'
}
)
if r.status_code != 200:
log('----Error uploading gif: Status code ' + str(r.status_code), Color.RED)
return None
error_text = r.json().get('error')
if error_text:
log('----Error uploading gif: ' + error_text, Color.RED)
return None
else:
upload_id = r.json()['id']
canonical_url = r.json()['canonical_url']
timeout = 60
while timeout > 0:
r = requests.get(
'http://offsided.com/api/v1/' + upload_id,
headers={
'Accept': 'application/json'
}
)
if r.json()['status'] == 'complete':
log('----Video is complete at ' + r.json()['canonical_url'], Color.GREEN)
log('----success', Color.GREEN)
return canonical_url
elif r.json()['status'] == 'error':
log('----Conversion failed.', Color.RED)
return None
else:
timeout -= 1
time.sleep(1)
def get_offsided_info(f_id):
req_url = "http://offsided.com/link/%s" % f_id
r = requests.get(req_url)
data = r.json()
return data
def streamable_convert(url_to_convert, streamable_pwd):
log('--Converting to streamable')
url = "https://api.streamable.com/import?url=%s&noresize" % url_to_convert
r = requests.get(url, auth=('gfy_mirror', streamable_pwd))
upload_id = r.json()["shortcode"]
return "https://streamable.com/%s" % upload_id
def get_streamable_info(s_id):
req_url = "https://api.streamable.com/videos/%s" % s_id
r = requests.get(req_url, auth=('gfy_mirror', 'WinYeaUsEyZ7W4'))
data = r.json()
return data
def imgur_upload(title, url_to_process):
log('--Uploading to imgur')
headers = {"Authorization": "Client-ID c4f5de959205bb4",
'Content-type': 'application/json',
'Accept': 'application/json'}
req_data = {
'image': url_to_process,
'title': title,
'type': 'URL'
}
r = requests.post(
'https://api.imgur.com/3/upload',
data=json.dumps(req_data),
headers=headers
)
assert r.status_code == 200
jdata = r.json()
if jdata['success']:
link = jdata['data']['link']
print('link is ' + link)
return link
else:
print("error")
# Returns the .mp4 url of a vine video
def retrieve_vine_video_url(vine_url):
log('--Retrieving vine url')
d = pyquery.PyQuery(url=vine_url)
video_url = d("meta[property=twitter\\:player\\:stream]").attr['content']
video_url = video_url.partition("?")[0]
return video_url
def retrieve_vine_cdn_url(cdn_url):
idx = cdn_url.find('.mp4')
idx += 4
s = cdn_url[0:idx]
return s
# Generate a random 10 letter string
# Borrowed from here: http://stackoverflow.com/a/16962716/3034339
def gen_random_string():
return ''.join(random.sample(string.ascii_letters * 10, 10))
# Gets the id of a video assuming it's of the "website.com/<id>" type
def get_id(url_to_get):
if url_to_get[-1] == '/':
url_to_get = url_to_get[:-1]
end = url_to_get.split('/')[-1]
if '.' in end:
# Truncate the extension if need be
return os.path.splitext(end)[0]
else:
return end
# Get gfycat info
def get_gfycat_info(gfy_id):
response = requests.get("http://www.gfycat.com/cajax/get/%s" % gfy_id)
jdata = response.json()
return jdata['gfyItem']
def get_remote_file_size(url):
d = request.urlopen(url)
return d.length
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import atexit
import logging
import os
import pendulum
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from sqlalchemy.pool import NullPool
from airflow import configuration as conf
from airflow.logging_config import configure_logging
from airflow.utils.sqlalchemy import setup_event_handlers
log = logging.getLogger(__name__)
TIMEZONE = pendulum.timezone('UTC')
try:
tz = conf.get("core", "default_timezone")
if tz == "system":
TIMEZONE = pendulum.local_timezone()
else:
TIMEZONE = pendulum.timezone(tz)
except:
pass
log.info("Configured default timezone %s" % TIMEZONE)
class DummyStatsLogger(object):
@classmethod
def incr(cls, stat, count=1, rate=1):
pass
@classmethod
def decr(cls, stat, count=1, rate=1):
pass
@classmethod
def gauge(cls, stat, value, rate=1, delta=False):
pass
@classmethod
def timing(cls, stat, dt):
pass
Stats = DummyStatsLogger
if conf.getboolean('scheduler', 'statsd_on'):
from statsd import StatsClient
statsd = StatsClient(
host=conf.get('scheduler', 'statsd_host'),
port=conf.getint('scheduler', 'statsd_port'),
prefix=conf.get('scheduler', 'statsd_prefix'))
Stats = statsd
else:
Stats = DummyStatsLogger
HEADER = """\
____________ _____________
____ |__( )_________ __/__ /________ __
____ /| |_ /__ ___/_ /_ __ /_ __ \_ | /| / /
___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /
_/_/ |_/_/ /_/ /_/ /_/ \____/____/|__/
"""
BASE_LOG_URL = '/admin/airflow/log'
LOGGING_LEVEL = logging.INFO
# the prefix to append to gunicorn worker processes after init
GUNICORN_WORKER_READY_PREFIX = "[ready] "
LOG_FORMAT = conf.get('core', 'log_format')
SIMPLE_LOG_FORMAT = conf.get('core', 'simple_log_format')
AIRFLOW_HOME = None
SQL_ALCHEMY_CONN = None
DAGS_FOLDER = None
engine = None
Session = None
def policy(task_instance):
"""
This policy setting allows altering task instances right before they
are executed. It allows administrator to rewire some task parameters.
Note that the ``TaskInstance`` object has an attribute ``task`` pointing
to its related task object, that in turns has a reference to the DAG
object. So you can use the attributes of all of these to define your
policy.
To define policy, add a ``airflow_local_settings`` module
to your PYTHONPATH that defines this ``policy`` function. It receives
a ``TaskInstance`` object and can alter it where needed.
Here are a few examples of how this can be useful:
* You could enforce a specific queue (say the ``spark`` queue)
for tasks using the ``SparkOperator`` to make sure that these
task instances get wired to the right workers
* You could force all task instances running on an
``execution_date`` older than a week old to run in a ``backfill``
pool.
* ...
"""
pass
def configure_vars():
global AIRFLOW_HOME
global SQL_ALCHEMY_CONN
global DAGS_FOLDER
AIRFLOW_HOME = os.path.expanduser(conf.get('core', 'AIRFLOW_HOME'))
SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN')
DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
def configure_orm(disable_connection_pool=False):
log.debug("Setting up DB connection pool (PID %s)" % os.getpid())
global engine
global Session
engine_args = {}
pool_connections = conf.getboolean('core', 'SQL_ALCHEMY_POOL_ENABLED')
if disable_connection_pool or not pool_connections:
engine_args['poolclass'] = NullPool
elif 'sqlite' not in SQL_ALCHEMY_CONN:
# Engine args not supported by sqlite
engine_args['pool_size'] = conf.getint('core', 'SQL_ALCHEMY_POOL_SIZE')
engine_args['pool_recycle'] = conf.getint('core',
'SQL_ALCHEMY_POOL_RECYCLE')
engine = create_engine(SQL_ALCHEMY_CONN, **engine_args)
reconnect_timeout = conf.getint('core', 'SQL_ALCHEMY_RECONNECT_TIMEOUT')
setup_event_handlers(engine, reconnect_timeout)
Session = scoped_session(
sessionmaker(autocommit=False, autoflush=False, bind=engine))
def dispose_orm():
""" Properly close pooled database connections """
log.debug("Disposing DB connection pool (PID %s)", os.getpid())
global engine
global Session
if Session:
Session.remove()
Session = None
if engine:
engine.dispose()
engine = None
def configure_adapters():
from pendulum import Pendulum
try:
from sqlite3 import register_adapter
register_adapter(Pendulum, lambda val: val.isoformat(' '))
except ImportError:
pass
try:
import MySQLdb.converters
MySQLdb.converters.conversions[Pendulum] = MySQLdb.converters.DateTime2literal
except ImportError:
pass
try:
from airflow_local_settings import *
log.info("Loaded airflow_local_settings.")
except:
pass
configure_logging()
configure_vars()
configure_adapters()
configure_orm()
# Ensure we close DB connections at scheduler and gunicon worker terminations
atexit.register(dispose_orm)
# Const stuff
KILOBYTE = 1024
MEGABYTE = KILOBYTE * KILOBYTE
WEB_COLORS = {'LIGHTBLUE': '#4d9de0',
'LIGHTORANGE': '#FF9933'}
| |
try:
from builtins import object
except ImportError:
pass
from transitions import Machine, State, MachineError
from transitions.core import listify
from unittest import TestCase
from .utils import Stuff, InheritedStuff
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class TestTransitions(TestCase):
def setUp(self):
self.stuff = Stuff()
def tearDown(self):
pass
def test_init_machine_with_hella_arguments(self):
states = [
State('State1'),
'State2',
{
'name': 'State3',
'on_enter': 'hello_world'
}
]
transitions = [
{'trigger': 'advance',
'source': 'State2',
'dest': 'State3'
}
]
s = Stuff()
m = Machine(model=s, states=states, transitions=transitions, initial='State2')
s.advance()
self.assertEquals(s.message, 'Hello World!')
def test_listify(self):
self.assertEquals(listify(4), [4])
self.assertEquals(listify(None), [])
self.assertEquals(listify((4, 5)), (4, 5))
self.assertEquals(listify([1, 3]), [1, 3])
def test_property_initial(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
self.assertEquals(m.initial, 'A')
m = Machine(states=states, transitions=transitions, initial='C')
self.assertEquals(m.initial, 'C')
m = Machine(states=states, transitions=transitions)
self.assertEquals(m.initial, 'initial')
def test_transition_definitions(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk()
self.assertEquals(m.state, 'B')
# Define with list of lists
transitions = [
['walk', 'A', 'B'],
['run', 'B', 'C'],
['sprint', 'C', 'D']
]
m = Machine(states=states, transitions=transitions, initial='A')
m.to_C()
m.sprint()
self.assertEquals(m.state, 'D')
def test_transitioning(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B')
s.machine.add_transition('advance', 'B', 'C')
s.machine.add_transition('advance', 'C', 'D')
s.advance()
self.assertEquals(s.state, 'B')
self.assertFalse(s.is_A())
self.assertTrue(s.is_B())
s.advance()
self.assertEquals(s.state, 'C')
def test_conditions(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B', conditions='this_passes')
s.machine.add_transition('advance', 'B', 'C', unless=['this_fails'])
s.machine.add_transition('advance', 'C', 'D', unless=['this_fails',
'this_passes'])
s.advance()
self.assertEquals(s.state, 'B')
s.advance()
self.assertEquals(s.state, 'C')
s.advance()
self.assertEquals(s.state, 'C')
def test_multiple_add_transitions_from_state(self):
s = self.stuff
s.machine.add_transition(
'advance', 'A', 'B', conditions=['this_fails'])
s.machine.add_transition('advance', 'A', 'C')
s.advance()
self.assertEquals(s.state, 'C')
def test_use_machine_as_model(self):
states = ['A', 'B', 'C', 'D']
m = Machine(states=states, initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move_to_C', 'B', 'C')
m.move()
self.assertEquals(m.state, 'B')
def test_state_change_listeners(self):
s = self.stuff
s.machine.add_transition('advance', 'A', 'B')
s.machine.add_transition('reverse', 'B', 'A')
s.machine.on_enter_B('hello_world')
s.machine.on_exit_B('goodbye')
s.advance()
self.assertEquals(s.state, 'B')
self.assertEquals(s.message, 'Hello World!')
s.reverse()
self.assertEquals(s.state, 'A')
self.assertTrue(s.message.startswith('So long'))
def test_before_after_callback_addition(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
trans = m.events['move'].transitions['A'][0]
trans.add_callback('after', 'increase_level')
m.model.move()
self.assertEquals(m.model.level, 2)
def test_before_after_transition_listeners(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move', 'B', 'C')
m.before_move('increase_level')
m.model.move()
self.assertEquals(m.model.level, 2)
m.model.move()
self.assertEquals(m.model.level, 3)
def test_prepare(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B', prepare='increase_level')
m.add_transition('move', 'B', 'C', prepare='increase_level')
m.add_transition('move', 'C', 'A', prepare='increase_level', conditions='this_fails')
m.add_transition('dont_move', 'A', 'C', prepare='increase_level')
m.prepare_move('increase_level')
m.model.move()
self.assertEquals(m.model.state, 'B')
self.assertEquals(m.model.level, 3)
m.model.move()
self.assertEquals(m.model.state, 'C')
self.assertEquals(m.model.level, 5)
# State does not advance, but increase_level still runs
m.model.move()
self.assertEquals(m.model.state, 'C')
self.assertEquals(m.model.level, 7)
# An invalid transition shouldn't execute the callback
with self.assertRaises(MachineError):
m.model.dont_move()
self.assertEquals(m.model.state, 'C')
self.assertEquals(m.model.level, 7)
def test_state_model_change_listeners(self):
s = self.stuff
s.machine.add_transition('go_e', 'A', 'E')
s.machine.add_transition('go_f', 'E', 'F')
s.machine.on_enter_F('hello_F')
s.go_e()
self.assertEquals(s.state, 'E')
self.assertEquals(s.message, 'I am E!')
s.go_f()
self.assertEquals(s.state, 'F')
self.assertEquals(s.exit_message, 'E go home...')
assert 'I am F!' in s.message
assert 'Hello F!' in s.message
def test_inheritance(self):
states = ['A', 'B', 'C', 'D', 'E']
s = InheritedStuff(states=states, initial='A')
s.add_transition('advance', 'A', 'B', conditions='this_passes')
s.add_transition('advance', 'B', 'C')
s.add_transition('advance', 'C', 'D')
s.advance()
self.assertEquals(s.state, 'B')
self.assertFalse(s.is_A())
self.assertTrue(s.is_B())
s.advance()
self.assertEquals(s.state, 'C')
def test_send_event_data_callbacks(self):
states = ['A', 'B', 'C', 'D', 'E']
s = Stuff()
# First pass positional and keyword args directly to the callback
m = Machine(model=s, states=states, initial='A', send_event=False,
auto_transitions=True)
m.add_transition(
trigger='advance', source='A', dest='B', before='set_message')
s.advance(message='Hallo. My name is Inigo Montoya.')
self.assertTrue(s.message.startswith('Hallo.'))
# Make sure callbacks handle arguments properly
s.to_E("Optional message")
self.assertEquals(s.message, 'Optional message')
s.to_B()
# Now wrap arguments in an EventData instance
m.send_event = True
m.add_transition(
trigger='advance', source='B', dest='C', before='extract_message')
s.advance(message='You killed my father. Prepare to die.')
self.assertTrue(s.message.startswith('You'))
def test_send_event_data_conditions(self):
states = ['A', 'B', 'C', 'D']
s = Stuff()
# First pass positional and keyword args directly to the condition
m = Machine(model=s, states=states, initial='A', send_event=False)
m.add_transition(
trigger='advance', source='A', dest='B',
conditions='this_fails_by_default')
s.advance(boolean=True)
self.assertEquals(s.state, 'B')
# Now wrap arguments in an EventData instance
m.send_event = True
m.add_transition(
trigger='advance', source='B', dest='C',
conditions='extract_boolean')
s.advance(boolean=False)
self.assertEquals(s.state, 'B')
def test_auto_transitions(self):
states = ['A', {'name': 'B'}, State(name='C')]
m = Machine(None, states, initial='A', auto_transitions=True)
m.to_B()
self.assertEquals(m.state, 'B')
m.to_C()
self.assertEquals(m.state, 'C')
m.to_A()
self.assertEquals(m.state, 'A')
# Should fail if auto transitions is off...
m = Machine(None, states, initial='A', auto_transitions=False)
with self.assertRaises(AttributeError):
m.to_C()
def test_ordered_transitions(self):
states = ['beginning', 'middle', 'end']
m = Machine(None, states)
m.add_ordered_transitions()
self.assertEquals(m.state, 'initial')
m.next_state()
self.assertEquals(m.state, 'beginning')
m.next_state()
m.next_state()
self.assertEquals(m.state, 'end')
m.next_state()
self.assertEquals(m.state, 'initial')
# Include initial state in loop
m = Machine(None, states)
m.add_ordered_transitions(loop_includes_initial=False)
m.to_end()
m.next_state()
self.assertEquals(m.state, 'beginning')
# Test user-determined sequence and trigger name
m = Machine(None, states, initial='beginning')
m.add_ordered_transitions(['end', 'beginning'], trigger='advance')
m.advance()
self.assertEquals(m.state, 'end')
m.advance()
self.assertEquals(m.state, 'beginning')
# Via init argument
m = Machine(
None, states, initial='beginning', ordered_transitions=True)
m.next_state()
self.assertEquals(m.state, 'middle')
def test_ignore_invalid_triggers(self):
a_state = State('A')
transitions = [['a_to_b', 'A', 'B']]
# Exception is triggered by default
b_state = State('B')
m1 = Machine(None, states=[a_state, b_state], transitions=transitions,
initial='B')
with self.assertRaises(MachineError):
m1.a_to_b()
# Exception is suppressed, so this passes
b_state = State('B', ignore_invalid_triggers=True)
m2 = Machine(None, states=[a_state, b_state], transitions=transitions,
initial='B')
m2.a_to_b()
# Set for some states but not others
new_states = ['C', 'D']
m1.add_states(new_states, ignore_invalid_triggers=True)
m1.to_D()
m1.a_to_b() # passes because exception suppressed for D
m1.to_B()
with self.assertRaises(MachineError):
m1.a_to_b()
# Set at machine level
m3 = Machine(None, states=[a_state, b_state], transitions=transitions,
initial='B', ignore_invalid_triggers=True)
m3.a_to_b()
def test_string_callbacks(self):
m = Machine(None, states=['A', 'B'],
before_state_change='before_state_change',
after_state_change='after_state_change', send_event=True,
initial='A', auto_transitions=True)
m.before_state_change = MagicMock()
m.after_state_change = MagicMock()
m.to_B()
self.assertTrue(m.before_state_change.called)
self.assertTrue(m.after_state_change.called)
def test_function_callbacks(self):
before_state_change = MagicMock()
after_state_change = MagicMock()
m = Machine(None, states=['A', 'B'],
before_state_change=before_state_change,
after_state_change=after_state_change, send_event=True,
initial='A', auto_transitions=True)
m.to_B()
self.assertTrue(m.before_state_change.called)
self.assertTrue(m.after_state_change.called)
def test_pickle(self):
import sys
if sys.version_info < (3, 4):
import dill as pickle
else:
import pickle
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B'},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk()
dump = pickle.dumps(m)
self.assertIsNotNone(dump)
m2 = pickle.loads(dump)
self.assertEqual(m.state, m2.state)
m2.run()
def test_async(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
def change_state(mod, machine):
self.assertEqual(mod.state, 'A')
if machine.has_queue:
machine.run(mod=mod, machine=machine)
self.assertEqual(mod.state, 'A')
else:
with self.assertRaises(MachineError):
machine.run(mod=mod, machine=machine)
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B', 'before': change_state},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
]
m = Machine(states=states, transitions=transitions, initial='A')
m.walk(mod=m, machine=m)
self.assertEqual(m.state, 'B')
m = Machine(states=states, transitions=transitions, initial='A', queued=True)
m.walk(mod=m, machine=m)
self.assertEqual(m.state, 'C')
def test___getattr___and_identify_callback(self):
m = Machine(Stuff(), states=['A', 'B', 'C'], initial='A')
m.add_transition('move', 'A', 'B')
m.add_transition('move', 'B', 'C')
callback = m.__getattr__('before_move')
self.assertTrue(callable(callback))
with self.assertRaises(MachineError):
m.__getattr__('before_no_such_transition')
with self.assertRaises(MachineError):
m.__getattr__('before_no_such_transition')
with self.assertRaises(AttributeError):
m.__getattr__('__no_such_method__')
with self.assertRaises(AttributeError):
m.__getattr__('')
type, target = m._identify_callback('on_exit_foobar')
self.assertEqual(type, 'on_exit')
self.assertEqual(target, 'foobar')
type, target = m._identify_callback('on_exitfoobar')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('notacallback_foobar')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('totallyinvalid')
self.assertEqual(type, None)
self.assertEqual(target, None)
type, target = m._identify_callback('before__foobar')
self.assertEqual(type, 'before')
self.assertEqual(target, '_foobar')
type, target = m._identify_callback('before__this__user__likes__underscores___')
self.assertEqual(type, 'before')
self.assertEqual(target, '_this__user__likes__underscores___')
type, target = m._identify_callback('before_stuff')
self.assertEqual(type, 'before')
self.assertEqual(target, 'stuff')
type, target = m._identify_callback('before_trailing_underscore_')
self.assertEqual(type, 'before')
self.assertEqual(target, 'trailing_underscore_')
type, target = m._identify_callback('before_')
self.assertIs(type, None)
self.assertIs(target, None)
type, target = m._identify_callback('__')
self.assertIs(type, None)
self.assertIs(target, None)
type, target = m._identify_callback('')
self.assertIs(type, None)
self.assertIs(target, None)
def test_state_and_transition_with_underscore(self):
m = Machine(Stuff(), states=['_A_', '_B_', '_C_'], initial='_A_')
m.add_transition('_move_', '_A_', '_B_', prepare='increase_level')
m.add_transition('_after_', '_B_', '_C_', prepare='increase_level')
m.add_transition('_on_exit_', '_C_', '_A_', prepare='increase_level', conditions='this_fails')
m.model._move_()
self.assertEquals(m.model.state, '_B_')
self.assertEquals(m.model.level, 2)
m.model._after_()
self.assertEquals(m.model.state, '_C_')
self.assertEquals(m.model.level, 3)
# State does not advance, but increase_level still runs
m.model._on_exit_()
self.assertEquals(m.model.state, '_C_')
self.assertEquals(m.model.level, 4)
def test_callback_identification(self):
m = Machine(Stuff(), states=['A', 'B', 'C', 'D', 'E', 'F'], initial='A')
m.add_transition('transition', 'A', 'B', before='increase_level')
m.add_transition('after', 'B', 'C', before='increase_level')
m.add_transition('on_exit_A', 'C', 'D', before='increase_level', conditions='this_fails')
m.add_transition('check', 'C', 'E', before='increase_level')
m.add_transition('prepare', 'E', 'F', before='increase_level')
m.add_transition('before', 'F', 'A', before='increase_level')
m.before_transition('increase_level')
m.before_after('increase_level')
m.before_on_exit_A('increase_level')
m.after_check('increase_level')
m.before_prepare('increase_level')
m.before_before('increase_level')
m.model.transition()
self.assertEquals(m.model.state, 'B')
self.assertEquals(m.model.level, 3)
m.model.after()
self.assertEquals(m.model.state, 'C')
self.assertEquals(m.model.level, 5)
m.model.on_exit_A()
self.assertEquals(m.model.state, 'C')
self.assertEquals(m.model.level, 5)
m.model.check()
self.assertEquals(m.model.state, 'E')
self.assertEquals(m.model.level, 7)
m.model.prepare()
self.assertEquals(m.model.state, 'F')
self.assertEquals(m.model.level, 9)
m.model.before()
self.assertEquals(m.model.state, 'A')
self.assertEquals(m.model.level, 11)
# An invalid transition shouldn't execute the callback
with self.assertRaises(MachineError):
m.model.on_exit_A()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import uuid
from lxml import etree
import nose.exc
from keystone.common import serializer
from keystone.openstack.common import jsonutils
from keystone import test
import default_fixtures
class RestfulTestCase(test.TestCase):
"""Performs restful tests against the WSGI app over HTTP.
This class launches public & admin WSGI servers for every test, which can
be accessed by calling ``public_request()`` or ``admin_request()``,
respectfully.
``restful_request()`` and ``request()`` methods are also exposed if you
need to bypass restful conventions or access HTTP details in your test
implementation.
Three new asserts are provided:
* ``assertResponseSuccessful``: called automatically for every request
unless an ``expected_status`` is provided
* ``assertResponseStatus``: called instead of ``assertResponseSuccessful``,
if an ``expected_status`` is provided
* ``assertValidResponseHeaders``: validates that the response headers
appear as expected
Requests are automatically serialized according to the defined
``content_type``. Responses are automatically deserialized as well, and
available in the ``response.body`` attribute. The original body content is
available in the ``response.raw`` attribute.
"""
# default content type to test
content_type = 'json'
def setUp(self):
super(RestfulTestCase, self).setUp()
self.load_backends()
self.load_fixtures(default_fixtures)
self.public_server = self.serveapp('keystone', name='main')
self.admin_server = self.serveapp('keystone', name='admin')
# TODO(termie): is_admin is being deprecated once the policy stuff
# is all working
# TODO(termie): add an admin user to the fixtures and use that user
# override the fixtures, for now
self.metadata_foobar = self.identity_api.update_metadata(
self.user_foo['id'],
self.tenant_bar['id'],
dict(roles=['keystone_admin'], is_admin='1'))
def tearDown(self):
"""Kill running servers and release references to avoid leaks."""
self.public_server.kill()
self.admin_server.kill()
self.public_server = None
self.admin_server = None
super(RestfulTestCase, self).tearDown()
def request(self, host='0.0.0.0', port=80, method='GET', path='/',
headers=None, body=None, expected_status=None):
"""Perform request and fetch httplib.HTTPResponse from the server."""
# Initialize headers dictionary
headers = {} if not headers else headers
connection = httplib.HTTPConnection(host, port, timeout=10)
# Perform the request
connection.request(method, path, body, headers)
# Retrieve the response so we can close the connection
response = connection.getresponse()
response.body = response.read()
# Close the connection
connection.close()
# Automatically assert HTTP status code
if expected_status:
self.assertResponseStatus(response, expected_status)
else:
self.assertResponseSuccessful(response)
self.assertValidResponseHeaders(response)
# Contains the response headers, body, etc
return response
def assertResponseSuccessful(self, response):
"""Asserts that a status code lies inside the 2xx range.
:param response: :py:class:`httplib.HTTPResponse` to be
verified to have a status code between 200 and 299.
example::
>>> self.assertResponseSuccessful(response, 203)
"""
self.assertTrue(
response.status >= 200 and response.status <= 299,
'Status code %d is outside of the expected range (2xx)\n\n%s' %
(response.status, response.body))
def assertResponseStatus(self, response, expected_status):
"""Asserts a specific status code on the response.
:param response: :py:class:`httplib.HTTPResponse`
:param assert_status: The specific ``status`` result expected
example::
>>> self.assertResponseStatus(response, 203)
"""
self.assertEqual(
response.status,
expected_status,
'Status code %s is not %s, as expected)\n\n%s' %
(response.status, expected_status, response.body))
def assertValidResponseHeaders(self, response):
"""Ensures that response headers appear as expected."""
self.assertIn('X-Auth-Token', response.getheader('Vary'))
def _to_content_type(self, body, headers, content_type=None):
"""Attempt to encode JSON and XML automatically."""
content_type = content_type or self.content_type
if content_type == 'json':
headers['Accept'] = 'application/json'
if body:
headers['Content-Type'] = 'application/json'
return jsonutils.dumps(body)
elif content_type == 'xml':
headers['Accept'] = 'application/xml'
if body:
headers['Content-Type'] = 'application/xml'
return serializer.to_xml(body)
def _from_content_type(self, response, content_type=None):
"""Attempt to decode JSON and XML automatically, if detected."""
content_type = content_type or self.content_type
# make the original response body available, for convenience
response.raw = response.body
if response.body is not None and response.body.strip():
# if a body is provided, a Content-Type is also expected
header = response.getheader('Content-Type', None)
self.assertIn(self.content_type, header)
if self.content_type == 'json':
response.body = jsonutils.loads(response.body)
elif self.content_type == 'xml':
response.body = etree.fromstring(response.body)
def restful_request(self, headers=None, body=None, token=None, **kwargs):
"""Serializes/deserializes json/xml as request/response body.
.. WARNING::
* Existing Accept header will be overwritten.
* Existing Content-Type header will be overwritten.
"""
# Initialize headers dictionary
headers = {} if not headers else headers
if token is not None:
headers['X-Auth-Token'] = token
body = self._to_content_type(body, headers)
# Perform the HTTP request/response
response = self.request(headers=headers, body=body, **kwargs)
self._from_content_type(response)
# we can save some code & improve coverage by always doing this
if response.status >= 400:
self.assertValidErrorResponse(response)
# Contains the decoded response.body
return response
def _get_port(self, server):
return server.socket_info['socket'][1]
def _public_port(self):
return self._get_port(self.public_server)
def _admin_port(self):
return self._get_port(self.admin_server)
def public_request(self, port=None, **kwargs):
kwargs['port'] = port or self._public_port()
response = self.restful_request(**kwargs)
self.assertValidResponseHeaders(response)
return response
def admin_request(self, port=None, **kwargs):
kwargs['port'] = port or self._admin_port()
response = self.restful_request(**kwargs)
self.assertValidResponseHeaders(response)
return response
def get_scoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': self.tenant_bar['id'],
},
})
return self._get_token_id(r)
def _get_token_id(self, r):
"""Helper method to return a token ID from a response.
This needs to be overridden by child classes for on their content type.
"""
raise NotImplementedError()
class CoreApiTests(object):
def assertValidError(self, error):
"""Applicable to XML and JSON."""
try:
print error.attrib
except:
pass
self.assertIsNotNone(error.get('code'))
self.assertIsNotNone(error.get('title'))
self.assertIsNotNone(error.get('message'))
def assertValidVersion(self, version):
"""Applicable to XML and JSON.
However, navigating links and media-types differs between content
types so they need to be validated seperately.
"""
self.assertIsNotNone(version)
self.assertIsNotNone(version.get('id'))
self.assertIsNotNone(version.get('status'))
self.assertIsNotNone(version.get('updated'))
def assertValidExtension(self, extension):
"""Applicable to XML and JSON.
However, navigating extension links differs between content types.
They need to be validated seperately with assertValidExtensionLink.
"""
self.assertIsNotNone(extension)
self.assertIsNotNone(extension.get('name'))
self.assertIsNotNone(extension.get('namespace'))
self.assertIsNotNone(extension.get('alias'))
self.assertIsNotNone(extension.get('updated'))
def assertValidExtensionLink(self, link):
"""Applicable to XML and JSON."""
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('type'))
self.assertIsNotNone(link.get('href'))
def assertValidTenant(self, tenant):
"""Applicable to XML and JSON."""
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
def assertValidUser(self, user):
"""Applicable to XML and JSON."""
self.assertIsNotNone(user.get('id'))
self.assertIsNotNone(user.get('name'))
def assertValidRole(self, tenant):
"""Applicable to XML and JSON."""
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
def test_public_multiple_choice(self):
r = self.public_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_admin_multiple_choice(self):
r = self.admin_request(path='/', expected_status=300)
self.assertValidMultipleChoiceResponse(r)
def test_public_version(self):
r = self.public_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_admin_version(self):
r = self.admin_request(path='/v2.0/')
self.assertValidVersionResponse(r)
def test_public_extensions(self):
self.public_request(path='/v2.0/extensions',)
# TODO(dolph): can't test this without any public extensions defined
# self.assertValidExtensionListResponse(r)
def test_admin_extensions(self):
r = self.admin_request(path='/v2.0/extensions',)
self.assertValidExtensionListResponse(r)
def test_admin_extensions_404(self):
self.admin_request(path='/v2.0/extensions/invalid-extension',
expected_status=404)
def test_public_osksadm_extension_404(self):
self.public_request(path='/v2.0/extensions/OS-KSADM',
expected_status=404)
def test_admin_osksadm_extension(self):
r = self.admin_request(path='/v2.0/extensions/OS-KSADM')
self.assertValidExtensionResponse(r)
def test_authenticate(self):
r = self.public_request(
method='POST',
path='/v2.0/tokens',
body={
'auth': {
'passwordCredentials': {
'username': self.user_foo['name'],
'password': self.user_foo['password'],
},
'tenantId': self.tenant_bar['id'],
},
},
# TODO(dolph): creating a token should result in a 201 Created
expected_status=200)
self.assertValidAuthenticationResponse(r)
def test_get_tenants_for_token(self):
r = self.public_request(path='/v2.0/tenants',
token=self.get_scoped_token())
self.assertValidTenantListResponse(r)
def test_validate_token(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token)
self.assertValidAuthenticationResponse(r)
def test_validate_token_belongs_to(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s?belongsTo=%s' % (token,
self.tenant_bar['id']))
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r,
require_service_catalog=True)
def test_validate_token_no_belongs_to_still_returns_catalog(self):
token = self.get_scoped_token()
path = ('/v2.0/tokens/%s' % token)
r = self.admin_request(path=path, token=token)
self.assertValidAuthenticationResponse(r,
require_service_catalog=True)
def test_validate_token_head(self):
"""The same call as above, except using HEAD.
There's no response to validate here, but this is included for the
sake of completely covering the core API.
"""
token = self.get_scoped_token()
self.admin_request(
method='HEAD',
path='/v2.0/tokens/%(token_id)s' % {
'token_id': token,
},
token=token,
expected_status=204)
def test_endpoints(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tokens/%(token_id)s/endpoints' % {
'token_id': token,
},
token=token)
self.assertValidEndpointListResponse(r)
def test_get_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s' % {
'tenant_id': self.tenant_bar['id'],
},
token=token)
self.assertValidTenantResponse(r)
def test_get_user_roles(self):
raise nose.exc.SkipTest('Blocked by bug 933565')
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users/%(user_id)s/roles' % {
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidRoleListResponse(r)
def test_get_user_roles_with_tenant(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': self.tenant_bar['id'],
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidRoleListResponse(r)
def test_get_user(self):
token = self.get_scoped_token()
r = self.admin_request(
path='/v2.0/users/%(user_id)s' % {
'user_id': self.user_foo['id'],
},
token=token)
self.assertValidUserResponse(r)
def test_error_response(self):
"""This triggers assertValidErrorResponse by convention."""
self.public_request(path='/v2.0/tenants', expected_status=401)
class JsonTestCase(RestfulTestCase, CoreApiTests):
content_type = 'json'
def _get_token_id(self, r):
"""Applicable only to JSON."""
return r.body['access']['token']['id']
def assertValidErrorResponse(self, r):
self.assertIsNotNone(r.body.get('error'))
self.assertValidError(r.body['error'])
self.assertEqual(r.body['error']['code'], r.status)
def assertValidExtension(self, extension):
super(JsonTestCase, self).assertValidExtension(extension)
self.assertIsNotNone(extension.get('description'))
self.assertIsNotNone(extension.get('links'))
self.assertTrue(len(extension.get('links')))
for link in extension.get('links'):
self.assertValidExtensionLink(link)
def assertValidExtensionListResponse(self, r):
self.assertIsNotNone(r.body.get('extensions'))
self.assertIsNotNone(r.body['extensions'].get('values'))
self.assertTrue(len(r.body['extensions'].get('values')))
for extension in r.body['extensions']['values']:
self.assertValidExtension(extension)
def assertValidExtensionResponse(self, r):
self.assertValidExtension(r.body.get('extension'))
def assertValidAuthenticationResponse(self, r,
require_service_catalog=False):
self.assertIsNotNone(r.body.get('access'))
self.assertIsNotNone(r.body['access'].get('token'))
self.assertIsNotNone(r.body['access'].get('user'))
# validate token
self.assertIsNotNone(r.body['access']['token'].get('id'))
self.assertIsNotNone(r.body['access']['token'].get('expires'))
tenant = r.body['access']['token'].get('tenant')
if tenant is not None:
# validate tenant
self.assertIsNotNone(tenant.get('id'))
self.assertIsNotNone(tenant.get('name'))
# validate user
self.assertIsNotNone(r.body['access']['user'].get('id'))
self.assertIsNotNone(r.body['access']['user'].get('name'))
serviceCatalog = r.body['access'].get('serviceCatalog')
# validate service catalog
if require_service_catalog:
self.assertIsNotNone(serviceCatalog)
if serviceCatalog is not None:
self.assertTrue(len(r.body['access']['serviceCatalog']))
for service in r.body['access']['serviceCatalog']:
# validate service
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
# services contain at least one endpoint
self.assertIsNotNone(service.get('endpoints'))
self.assertTrue(len(service['endpoints']))
for endpoint in service['endpoints']:
# validate service endpoint
self.assertIsNotNone(endpoint.get('publicURL'))
def assertValidTenantListResponse(self, r):
self.assertIsNotNone(r.body.get('tenants'))
self.assertTrue(len(r.body['tenants']))
for tenant in r.body['tenants']:
self.assertValidTenant(tenant)
self.assertIsNotNone(tenant.get('enabled'))
self.assertIn(tenant.get('enabled'), [True, False])
def assertValidUserResponse(self, r):
self.assertIsNotNone(r.body.get('user'))
self.assertValidUser(r.body['user'])
def assertValidTenantResponse(self, r):
self.assertIsNotNone(r.body.get('tenant'))
self.assertValidTenant(r.body['tenant'])
def assertValidRoleListResponse(self, r):
self.assertIsNotNone(r.body.get('roles'))
self.assertTrue(len(r.body['roles']))
for role in r.body['roles']:
self.assertValidRole(role)
def assertValidVersion(self, version):
super(JsonTestCase, self).assertValidVersion(version)
self.assertIsNotNone(version.get('links'))
self.assertTrue(len(version.get('links')))
for link in version.get('links'):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('href'))
self.assertIsNotNone(version.get('media-types'))
self.assertTrue(len(version.get('media-types')))
for media in version.get('media-types'):
self.assertIsNotNone(media.get('base'))
self.assertIsNotNone(media.get('type'))
def assertValidMultipleChoiceResponse(self, r):
self.assertIsNotNone(r.body.get('versions'))
self.assertIsNotNone(r.body['versions'].get('values'))
self.assertTrue(len(r.body['versions']['values']))
for version in r.body['versions']['values']:
self.assertValidVersion(version)
def assertValidVersionResponse(self, r):
self.assertValidVersion(r.body.get('version'))
def assertValidEndpointListResponse(self, r):
self.assertIsNotNone(r.body.get('endpoints'))
self.assertTrue(len(r.body['endpoints']))
for endpoint in r.body['endpoints']:
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('name'))
self.assertIsNotNone(endpoint.get('type'))
self.assertIsNotNone(endpoint.get('publicURL'))
self.assertIsNotNone(endpoint.get('internalURL'))
self.assertIsNotNone(endpoint.get('adminURL'))
def test_service_crud_requires_auth(self):
"""Service CRUD should 401 without an X-Auth-Token (bug 1006822)."""
# values here don't matter because we should 401 before they're checked
service_path = '/v2.0/OS-KSADM/services/%s' % uuid.uuid4().hex
service_body = {
'OS-KSADM:service': {
'name': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
},
}
r = self.admin_request(method='GET',
path='/v2.0/OS-KSADM/services',
expected_status=401)
self.assertValidErrorResponse(r)
r = self.admin_request(method='POST',
path='/v2.0/OS-KSADM/services',
body=service_body,
expected_status=401)
self.assertValidErrorResponse(r)
r = self.admin_request(method='GET',
path=service_path,
expected_status=401)
self.assertValidErrorResponse(r)
r = self.admin_request(method='DELETE',
path=service_path,
expected_status=401)
self.assertValidErrorResponse(r)
def test_user_role_list_requires_auth(self):
"""User role list should 401 without an X-Auth-Token (bug 1006815)."""
# values here don't matter because we should 401 before they're checked
path = '/v2.0/tenants/%(tenant_id)s/users/%(user_id)s/roles' % {
'tenant_id': uuid.uuid4().hex,
'user_id': uuid.uuid4().hex,
}
r = self.admin_request(path=path, expected_status=401)
self.assertValidErrorResponse(r)
def test_fetch_revocation_list_nonadmin_fails(self):
self.admin_request(
method='GET',
path='/v2.0/tokens/revoked',
expected_status=401)
def test_fetch_revocation_list_admin_200(self):
token = self.get_scoped_token()
r = self.restful_request(
method='GET',
path='/v2.0/tokens/revoked',
token=token,
expected_status=200,
port=self._admin_port())
self.assertValidRevocationListResponse(r)
def assertValidRevocationListResponse(self, response):
self.assertIsNotNone(response.body['signed'])
class XmlTestCase(RestfulTestCase, CoreApiTests):
xmlns = 'http://docs.openstack.org/identity/api/v2.0'
content_type = 'xml'
def _get_token_id(self, r):
return r.body.find(self._tag('token')).get('id')
def _tag(self, tag_name, xmlns=None):
"""Helper method to build an namespaced element name."""
return '{%(ns)s}%(tag)s' % {'ns': xmlns or self.xmlns, 'tag': tag_name}
def assertValidErrorResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('error'))
self.assertValidError(xml)
self.assertEqual(xml.get('code'), str(r.status))
def assertValidExtension(self, extension):
super(XmlTestCase, self).assertValidExtension(extension)
self.assertIsNotNone(extension.find(self._tag('description')))
self.assertTrue(extension.find(self._tag('description')).text)
self.assertTrue(len(extension.findall(self._tag('link'))))
for link in extension.findall(self._tag('link')):
self.assertValidExtensionLink(link)
def assertValidExtensionListResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('extensions'))
self.assertTrue(len(xml.findall(self._tag('extension'))))
for extension in xml.findall(self._tag('extension')):
self.assertValidExtension(extension)
def assertValidExtensionResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('extension'))
self.assertValidExtension(xml)
def assertValidVersion(self, version):
super(XmlTestCase, self).assertValidVersion(version)
self.assertTrue(len(version.findall(self._tag('link'))))
for link in version.findall(self._tag('link')):
self.assertIsNotNone(link.get('rel'))
self.assertIsNotNone(link.get('href'))
media_types = version.find(self._tag('media-types'))
self.assertIsNotNone(media_types)
self.assertTrue(len(media_types.findall(self._tag('media-type'))))
for media in media_types.findall(self._tag('media-type')):
self.assertIsNotNone(media.get('base'))
self.assertIsNotNone(media.get('type'))
def assertValidMultipleChoiceResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('versions'))
self.assertTrue(len(xml.findall(self._tag('version'))))
for version in xml.findall(self._tag('version')):
self.assertValidVersion(version)
def assertValidVersionResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('version'))
self.assertValidVersion(xml)
def assertValidEndpointListResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('endpoints'))
self.assertTrue(len(xml.findall(self._tag('endpoint'))))
for endpoint in xml.findall(self._tag('endpoint')):
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('name'))
self.assertIsNotNone(endpoint.get('type'))
self.assertIsNotNone(endpoint.get('publicURL'))
self.assertIsNotNone(endpoint.get('internalURL'))
self.assertIsNotNone(endpoint.get('adminURL'))
def assertValidTenantResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('tenant'))
self.assertValidTenant(xml)
def assertValidUserResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('user'))
self.assertValidUser(xml)
def assertValidRoleListResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('roles'))
self.assertTrue(len(r.body.findall(self._tag('role'))))
for role in r.body.findall(self._tag('role')):
self.assertValidRole(role)
def assertValidAuthenticationResponse(self, r,
require_service_catalog=False):
xml = r.body
self.assertEqual(xml.tag, self._tag('access'))
# validate token
token = xml.find(self._tag('token'))
self.assertIsNotNone(token)
self.assertIsNotNone(token.get('id'))
self.assertIsNotNone(token.get('expires'))
tenant = token.find(self._tag('tenant'))
if tenant is not None:
# validate tenant
self.assertValidTenant(tenant)
self.assertIn(tenant.get('enabled'), ['true', 'false'])
user = xml.find(self._tag('user'))
self.assertIsNotNone(user)
self.assertIsNotNone(user.get('id'))
self.assertIsNotNone(user.get('name'))
serviceCatalog = xml.find(self._tag('serviceCatalog'))
# validate the serviceCatalog
if require_service_catalog:
self.assertIsNotNone(serviceCatalog)
if serviceCatalog is not None:
self.assertTrue(len(serviceCatalog.findall(self._tag('service'))))
for service in serviceCatalog.findall(self._tag('service')):
# validate service
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
# services contain at least one endpoint
self.assertTrue(len(service))
for endpoint in service.findall(self._tag('endpoint')):
# validate service endpoint
self.assertIsNotNone(endpoint.get('publicURL'))
def assertValidTenantListResponse(self, r):
xml = r.body
self.assertEqual(xml.tag, self._tag('tenants'))
self.assertTrue(len(r.body))
for tenant in r.body.findall(self._tag('tenant')):
self.assertValidTenant(tenant)
self.assertIn(tenant.get('enabled'), ['true', 'false'])
| |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import shutil
import unittest
import six
import socket
from tempfile import mkdtemp
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import SequentialExecutor
from airflow.jobs import BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDagBag
from mock import patch
from sqlalchemy.orm.session import make_transient
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
import sqlalchemy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents, tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = datetime.datetime.now()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
started = {}
tasks_to_run = {}
failed = set()
succeeded = set()
started = {}
skipped = set()
# test for success
ti.set_state(State.SUCCESS, session)
started[ti.key] = ti
job._update_counters(started=started, succeeded=succeeded,
skipped=skipped, failed=failed,
tasks_to_run=tasks_to_run)
self.assertTrue(len(started) == 0)
self.assertTrue(len(succeeded) == 1)
self.assertTrue(len(skipped) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(tasks_to_run) == 0)
succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
started[ti.key] = ti
job._update_counters(started=started, succeeded=succeeded,
skipped=skipped, failed=failed,
tasks_to_run=tasks_to_run)
self.assertTrue(len(started) == 0)
self.assertTrue(len(succeeded) == 0)
self.assertTrue(len(skipped) == 1)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(tasks_to_run) == 0)
skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
started[ti.key] = ti
job._update_counters(started=started, succeeded=succeeded,
skipped=skipped, failed=failed,
tasks_to_run=tasks_to_run)
self.assertTrue(len(started) == 0)
self.assertTrue(len(succeeded) == 0)
self.assertTrue(len(skipped) == 0)
self.assertTrue(len(failed) == 1)
self.assertTrue(len(tasks_to_run) == 0)
failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
started[ti.key] = ti
job._update_counters(started=started, succeeded=succeeded,
skipped=skipped, failed=failed,
tasks_to_run=tasks_to_run)
self.assertTrue(len(started) == 0)
self.assertTrue(len(succeeded) == 0)
self.assertTrue(len(skipped) == 0)
self.assertTrue(len(failed) == 0)
self.assertTrue(len(tasks_to_run) == 1)
session.close()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch.object(LocalTaskJob, "_is_descendant_process")
def test_localtaskjob_heartbeat(self, is_descendant):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
is_descendant.return_value = True
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
is_descendant.return_value = False
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = socket.getfqdn()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run, ignore_ti_state=True, executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.run)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5}
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def test_concurrency(self):
dag_id = 'SchedulerJobTest.test_concurrency'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = SimpleDagBag([dag])
scheduler = SchedulerJob(**self.default_scheduler_args)
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
session.close()
def test_change_state_for_tis_without_dagrun(self):
dag = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.state = State.SCHEDULED
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
dagbag = SimpleDagBag([dag])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.SCHEDULED)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
dr.refresh_from_db(session=session)
dr.state = State.FAILED
# why o why
session.merge(dr)
session.commit()
scheduler._change_state_for_tis_without_dagrun(simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.NONE)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob(**self.default_scheduler_args)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob(**self.default_scheduler_args)
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
file_process_interval=0,
processor_poll_interval=0.5,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=datetime.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=datetime.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = mock.Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = SimpleDagBag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = datetime.datetime.now()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration,
**self.default_scheduler_args)
scheduler.run()
end_time = datetime.datetime.now()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir= dag_directory,
num_runs=1,
**self.default_scheduler_args)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns it's active runs
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
now = datetime.datetime.now()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
three_minutes_ago = now - datetime.timedelta(minutes=3)
two_hours_and_three_minutes_ago = three_minutes_ago - datetime.timedelta(hours=2)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'no_catchup_test1'
DAG_NAME2 = 'no_catchup_test2'
DAG_NAME3 = 'no_catchup_test3'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
default_catchup = configuration.getboolean('scheduler', 'catchup_by_default')
# Test configs have catchup by default ON
self.assertEqual(default_catchup, True)
# Correct default?
self.assertEqual(dag1.catchup, True)
dag2 = DAG(DAG_NAME2,
schedule_interval='* * * * *',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag2)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag2)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag2)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag2.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag2.clear()
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 3 minutes, not 6 hours ago
self.assertGreater(dr.execution_date, three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
dag3 = DAG(DAG_NAME3,
schedule_interval='@hourly',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag3)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag3)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag3)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag3.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag3.clear()
dr = None
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last two hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_and_three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, datetime.datetime.now())
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
| |
# Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import futurist
import mock
from nova.conductor import manager as conductor_manager
from nova import context
from nova.db import api as db
from nova import objects
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.unit import fake_instance
class ServerMigrationsSampleJsonTest(test_servers.ServersSampleBase):
sample_dir = 'server-migrations'
scenarios = [('v2_22', {'api_major_version': 'v2.1'})]
microversion = '2.22'
def setUp(self):
"""setUp method for server usage."""
super(ServerMigrationsSampleJsonTest, self).setUp()
self.uuid = self._post_server()
@mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate')
@mock.patch.object(db, 'service_get_by_compute_host')
@mock.patch.object(objects.Migration, 'get_by_id_and_instance')
@mock.patch('nova.compute.manager.ComputeManager.'
'live_migration_force_complete')
def test_live_migrate_force_complete(self, live_migration_pause_instance,
get_by_id_and_instance,
service_get_by_compute_host,
_live_migrate):
migration = objects.Migration()
migration.id = 1
migration.status = 'running'
migration.source_compute = self.compute.host
get_by_id_and_instance.return_value = migration
self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server',
{'hostname': self.compute.host})
response = self._do_post('servers/%s/migrations/%s/action'
% (self.uuid, '3'), 'force_complete', {})
self.assertEqual(202, response.status_code)
def test_get_migration(self):
response = self._do_get('servers/fake_id/migrations/1234')
self.assertEqual(404, response.status_code)
def test_list_migrations(self):
response = self._do_get('servers/fake_id/migrations')
self.assertEqual(404, response.status_code)
class ServerMigrationsSamplesJsonTestV2_23(test_servers.ServersSampleBase):
ADMIN_API = True
sample_dir = "server-migrations"
microversion = '2.23'
scenarios = [('v2_23', {'api_major_version': 'v2.1'})]
UUID_1 = '4cfba335-03d8-49b2-8c52-e69043d1e8fe'
UUID_2 = '058fc419-a8a8-4e08-b62c-a9841ef9cd3f'
fake_migrations = [
{
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'running',
'instance_uuid': UUID_1,
'migration_type': 'live-migration',
'hidden': False,
'memory_total': 123456,
'memory_processed': 12345,
'memory_remaining': 111111,
'disk_total': 234567,
'disk_processed': 23456,
'disk_remaining': 211111,
'created_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2),
'updated_at': datetime.datetime(2016, 0o1, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False
},
{
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'migrating',
'instance_uuid': UUID_2,
'migration_type': 'resize',
'hidden': False,
'memory_total': 456789,
'memory_processed': 56789,
'memory_remaining': 400000,
'disk_total': 96789,
'disk_processed': 6789,
'disk_remaining': 90000,
'created_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2),
'updated_at': datetime.datetime(2016, 0o1, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False
}
]
def setUp(self):
super(ServerMigrationsSamplesJsonTestV2_23, self).setUp()
fake_context = context.RequestContext('fake', 'fake')
self.mig1 = objects.Migration(
context=fake_context, **self.fake_migrations[0])
self.mig1.create()
self.mig2 = objects.Migration(
context=fake_context, **self.fake_migrations[1])
self.mig2.create()
fake_ins = fake_instance.fake_db_instance(uuid=self.UUID_1)
fake_ins.pop("pci_devices")
fake_ins.pop("security_groups")
fake_ins.pop("services")
fake_ins.pop("tags")
fake_ins.pop("info_cache")
fake_ins.pop("id")
self.instance = objects.Instance(
context=fake_context,
**fake_ins)
self.instance.create()
def test_get_migration(self):
response = self._do_get('servers/%s/migrations/%s' %
(self.fake_migrations[0]["instance_uuid"],
self.mig1.id))
self.assertEqual(200, response.status_code)
self._verify_response('migrations-get',
{"server_uuid": self.UUID_1},
response, 200)
def test_list_migrations(self):
response = self._do_get('servers/%s/migrations' %
self.fake_migrations[0]["instance_uuid"])
self.assertEqual(200, response.status_code)
self._verify_response('migrations-index',
{"server_uuid_1": self.UUID_1},
response, 200)
class ServerMigrationsSampleJsonTestV2_24(test_servers.ServersSampleBase):
ADMIN_API = True
microversion = '2.24'
sample_dir = "server-migrations"
scenarios = [('v2_24', {'api_major_version': 'v2.1'})]
def setUp(self):
"""setUp method for server usage."""
super(ServerMigrationsSampleJsonTestV2_24, self).setUp()
self.uuid = self._post_server()
self.context = context.RequestContext('fake', 'fake')
fake_migration = {
'source_node': self.compute.host,
'dest_node': 'node10',
'source_compute': 'compute1',
'dest_compute': 'compute12',
'migration_type': 'live-migration',
'instance_uuid': self.uuid,
'status': 'running'}
self.migration = objects.Migration(context=self.context,
**fake_migration)
self.migration.create()
@mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate')
def test_live_migrate_abort(self, _live_migrate):
self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server',
{'hostname': self.compute.host})
uri = 'servers/%s/migrations/%s' % (self.uuid, self.migration.id)
response = self._do_delete(uri)
self.assertEqual(202, response.status_code)
@mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate')
def test_live_migrate_abort_migration_not_found(self, _live_migrate):
self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server',
{'hostname': self.compute.host})
uri = 'servers/%s/migrations/%s' % (self.uuid, '45')
response = self._do_delete(uri)
self.assertEqual(404, response.status_code)
@mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate')
def test_live_migrate_abort_migration_not_running(self, _live_migrate):
self.migration.status = 'completed'
self.migration.save()
self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server',
{'hostname': self.compute.host})
uri = 'servers/%s/migrations/%s' % (self.uuid, self.migration.id)
response = self._do_delete(uri)
self.assertEqual(400, response.status_code)
class ServerMigrationsSamplesJsonTestV2_59(
ServerMigrationsSamplesJsonTestV2_23
):
ADMIN_API = True
microversion = '2.59'
scenarios = [('v2_59', {'api_major_version': 'v2.1'})]
def setUp(self):
# Add UUIDs to the fake migrations used in the tests.
self.fake_migrations[0][
'uuid'] = '12341d4b-346a-40d0-83c6-5f4f6892b650'
self.fake_migrations[1][
'uuid'] = '22341d4b-346a-40d0-83c6-5f4f6892b650'
super(ServerMigrationsSamplesJsonTestV2_59, self).setUp()
class ServerMigrationsSampleJsonTestV2_65(ServerMigrationsSampleJsonTestV2_24):
ADMIN_API = True
microversion = '2.65'
scenarios = [('v2_65', {'api_major_version': 'v2.1'})]
@mock.patch.object(conductor_manager.ComputeTaskManager, '_live_migrate')
def test_live_migrate_abort_migration_queued(self, _live_migrate):
self.migration.status = 'queued'
self.migration.save()
self._do_post('servers/%s/action' % self.uuid, 'live-migrate-server',
{'hostname': self.compute.host})
self.compute._waiting_live_migrations[self.uuid] = (
self.migration, futurist.Future())
uri = 'servers/%s/migrations/%s' % (self.uuid, self.migration.id)
response = self._do_delete(uri)
self.assertEqual(202, response.status_code)
| |
# Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Quobyte volume driver module."""
import mock
import os
from oslo_concurrency import processutils
from oslo_utils import fileutils
from nova import exception
from nova import test
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import quobyte
class QuobyteTestCase(test.NoDBTestCase):
"""Tests the nova.virt.libvirt.volume.quobyte module utilities."""
@mock.patch.object(fileutils, "ensure_tree")
@mock.patch.object(utils, "execute")
def test_quobyte_mount_volume(self, mock_execute, mock_ensure_tree):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.mount_volume(quobyte_volume, export_mnt_base)
mock_ensure_tree.assert_called_once_with(export_mnt_base)
expected_commands = [mock.call('mount.quobyte',
quobyte_volume,
export_mnt_base,
check_exit_code=[0, 4])
]
mock_execute.assert_has_calls(expected_commands)
@mock.patch.object(fileutils, "ensure_tree")
@mock.patch.object(utils, "execute")
def test_quobyte_mount_volume_with_config(self,
mock_execute,
mock_ensure_tree):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
config_file_dummy = "/etc/quobyte/dummy.conf"
quobyte.mount_volume(quobyte_volume,
export_mnt_base,
config_file_dummy)
mock_ensure_tree.assert_called_once_with(export_mnt_base)
expected_commands = [mock.call('mount.quobyte',
quobyte_volume,
export_mnt_base,
'-c',
config_file_dummy,
check_exit_code=[0, 4])
]
mock_execute.assert_has_calls(expected_commands)
@mock.patch.object(fileutils, "ensure_tree")
@mock.patch.object(utils, "execute",
side_effect=(processutils.
ProcessExecutionError))
def test_quobyte_mount_volume_fails(self, mock_execute, mock_ensure_tree):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
self.assertRaises(processutils.ProcessExecutionError,
quobyte.mount_volume,
quobyte_volume,
export_mnt_base)
@mock.patch.object(utils, "execute")
def test_quobyte_umount_volume(self, mock_execute):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.umount_volume(export_mnt_base)
mock_execute.assert_called_once_with('umount.quobyte',
export_mnt_base)
@mock.patch.object(quobyte.LOG, "error")
@mock.patch.object(utils, "execute")
def test_quobyte_umount_volume_warns(self,
mock_execute,
mock_debug):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
def exec_side_effect(*cmd, **kwargs):
exerror = processutils.ProcessExecutionError()
exerror.message = "Device or resource busy"
raise exerror
mock_execute.side_effect = exec_side_effect
quobyte.umount_volume(export_mnt_base)
(mock_debug.
assert_called_once_with("The Quobyte volume at %s is still in use.",
export_mnt_base))
@mock.patch.object(quobyte.LOG, "exception")
@mock.patch.object(utils, "execute",
side_effect=(processutils.ProcessExecutionError))
def test_quobyte_umount_volume_fails(self,
mock_execute,
mock_exception):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.umount_volume(export_mnt_base)
(mock_exception.
assert_called_once_with("Couldn't unmount "
"the Quobyte Volume at %s",
export_mnt_base))
@mock.patch.object(os, "access", return_value=True)
@mock.patch.object(utils, "execute")
def test_quobyte_is_valid_volume(self, mock_execute, mock_access):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
quobyte.validate_volume(export_mnt_base)
mock_execute.assert_called_once_with('getfattr',
'-n',
'quobyte.info',
export_mnt_base)
@mock.patch.object(utils, "execute",
side_effect=(processutils.
ProcessExecutionError))
def test_quobyte_is_valid_volume_vol_not_valid_volume(self, mock_execute):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
self.assertRaises(exception.NovaException,
quobyte.validate_volume,
export_mnt_base)
@mock.patch.object(os, "access", return_value=False)
@mock.patch.object(utils, "execute",
side_effect=(processutils.
ProcessExecutionError))
def test_quobyte_is_valid_volume_vol_no_valid_access(self,
mock_execute,
mock_access):
mnt_base = '/mnt'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
self.assertRaises(exception.NovaException,
quobyte.validate_volume,
export_mnt_base)
class LibvirtQuobyteVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
"""Tests the LibvirtQuobyteVolumeDriver class."""
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'mount_volume')
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=False)
def test_libvirt_quobyte_driver_mount(self,
mock_is_mounted,
mock_mount_volume,
mock_validate_volume
):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn)
export_string = 'quobyte://192.168.1.1/volume-00001'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
mock_mount_volume.assert_called_once_with(quobyte_volume,
export_mnt_base,
mock.ANY)
mock_validate_volume.assert_called_with(export_mnt_base)
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'umount_volume')
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=True)
def test_libvirt_quobyte_driver_umount(self, mock_is_mounted,
mock_umount_volume,
mock_validate_volume):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn)
export_string = 'quobyte://192.168.1.1/volume-00001'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
mock_validate_volume.assert_called_once_with(export_mnt_base)
mock_umount_volume.assert_called_once_with(export_mnt_base)
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'umount_volume')
def test_libvirt_quobyte_driver_already_mounted(self,
mock_umount_volume,
mock_validate_volume
):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn)
export_string = 'quobyte://192.168.1.1/volume-00001'
quobyte_volume = '192.168.1.1/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
file_path = os.path.join(export_mnt_base, self.name)
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('findmnt', '--target', export_mnt_base,
'--source', "quobyte@" + quobyte_volume),
('findmnt', '--target', export_mnt_base,
'--source', "quobyte@" + quobyte_volume),
]
self.assertEqual(expected_commands, self.executes)
mock_umount_volume.assert_called_once_with(export_mnt_base)
mock_validate_volume.assert_called_once_with(export_mnt_base)
@mock.patch.object(quobyte, 'validate_volume')
@mock.patch.object(quobyte, 'mount_volume')
@mock.patch.object(libvirt_utils, 'is_mounted', return_value=False)
def test_libvirt_quobyte_driver_qcow2(self, mock_is_mounted,
mock_mount_volume,
mock_validate_volume
):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn)
export_string = 'quobyte://192.168.1.1/volume-00001'
name = 'volume-00001'
image_format = 'qcow2'
quobyte_volume = '192.168.1.1/volume-00001'
connection_info = {'data': {'export': export_string,
'name': name,
'format': image_format}}
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(quobyte_volume))
libvirt_driver.connect_volume(connection_info, self.disk_info)
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'file')
self.assertEqual(tree.find('./driver').get('type'), 'qcow2')
(mock_mount_volume.
assert_called_once_with('192.168.1.1/volume-00001',
export_mnt_base,
mock.ANY))
mock_validate_volume.assert_called_with(export_mnt_base)
libvirt_driver.disconnect_volume(connection_info, "vde")
def test_libvirt_quobyte_driver_mount_non_quobyte_volume(self):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn)
export_string = 'quobyte://192.168.1.1/volume-00001'
connection_info = {'data': {'export': export_string,
'name': self.name}}
def exe_side_effect(*cmd, **kwargs):
if cmd == mock.ANY:
raise exception.NovaException()
with mock.patch.object(quobyte,
'validate_volume') as mock_execute:
mock_execute.side_effect = exe_side_effect
self.assertRaises(exception.NovaException,
libvirt_driver.connect_volume,
connection_info,
self.disk_info)
def test_libvirt_quobyte_driver_normalize_url_with_protocol(self):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn)
export_string = 'quobyte://192.168.1.1/volume-00001'
self.assertEqual(libvirt_driver._normalize_url(export_string),
"192.168.1.1/volume-00001")
def test_libvirt_quobyte_driver_normalize_url_without_protocol(self):
mnt_base = '/mnt'
self.flags(quobyte_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = quobyte.LibvirtQuobyteVolumeDriver(self.fake_conn)
export_string = '192.168.1.1/volume-00001'
self.assertEqual(libvirt_driver._normalize_url(export_string),
"192.168.1.1/volume-00001")
| |
#!/usr/bin/env python
"""Example of display interactive flood-filling "inference" results.
shift+mousedown0 triggers the simulated flood filling to start with an initial
seed at the mouse position. The computed mask values are displayed as an image,
while the seed points chosen are displayed as point annotations.
keyt causes the simulated flood filling to stop.
In this example, the mask values are actually just computed as a distance
transform of the ground truth segmentation, and the seed points are restricted
to the ground truth segment and assign random priorities. In actual use, this
same visualization approach can be used to display the actual mask and seed
points computed by a flood filling TensorFlow model.
The cloudvolume library (https://github.com/seung-lab/cloud-volume) is used to
retrieve patches of the ground truth volume.
The zarr library is used to represent the sparse in-memory array containing the
computed inference results that are displayed in neuroglancer.
"""
import random
import time
import threading
import neuroglancer
import cloudvolume
import zarr
import numpy as np
import scipy.ndimage
class InteractiveInference(object):
def __init__(self):
viewer = self.viewer = neuroglancer.Viewer()
self.gt_vol = cloudvolume.CloudVolume(
'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
mip=0,
bounded=True,
progress=False,
provenance={})
viewer.actions.add('start-fill', self._start_fill_action)
viewer.actions.add('stop-fill', self._stop_fill_action)
with viewer.config_state.txn() as s:
s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'
s.input_event_bindings.data_view['keyt'] = 'stop-fill'
with viewer.txn() as s:
s.layers['image'] = neuroglancer.ImageLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
)
s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
source='precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
)
s.layers['ground_truth'].visible = False
self.flood_fill_event = None
def _do_flood_fill(self, initial_pos, inf_results, inf_volume, event):
initial_pos = (int(initial_pos[0]), int(initial_pos[1]), int(initial_pos[2]))
gt_vol_zarr = zarr.zeros(
self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint64)
gt_blocks_seen = set()
block_size = np.array((64, 64, 64), np.int64)
def fetch_gt_block(block):
spos = block * block_size
epos = spos + block_size
slice_expr = np.s_[int(spos[0]):int(epos[0]),
int(spos[1]):int(epos[1]),
int(spos[2]):int(epos[2])]
rev_slice_expr = np.s_[int(spos[2]):int(epos[2]),
int(spos[1]):int(epos[1]),
int(spos[0]):int(epos[0])]
gt_data = np.transpose(self.gt_vol[slice_expr][..., 0], (2, 1, 0))
gt_vol_zarr[rev_slice_expr] = gt_data
def get_patch(spos, epos):
spos = np.array(spos)
epos = np.array(epos)
sblock = spos // block_size
eblock = (epos - 1) // block_size
for blockoff in np.ndindex(tuple(eblock - sblock + 1)):
block = np.array(blockoff) + sblock
block_tuple = tuple(block)
if block_tuple in gt_blocks_seen: continue
gt_blocks_seen.add(block_tuple)
fetch_gt_block(block)
rev_slice_expr = np.s_[int(spos[2]):int(epos[2]),
int(spos[1]):int(epos[1]),
int(spos[0]):int(epos[0])]
result = gt_vol_zarr[rev_slice_expr]
return result
segment_id = self.gt_vol[initial_pos][0]
patch_size = np.array((33, ) * 3, np.int64)
lower_bound = patch_size // 2
upper_bound = np.array(self.gt_vol.bounds.to_list()[3:]) - patch_size + patch_size // 2
d = 8
seen = set()
q = []
last_invalidate = [time.time()]
invalidate_interval = 3
def enqueue(pos):
if np.any(pos < lower_bound) or np.any(pos >= upper_bound): return
if pos in seen: return
seen.add(pos)
q.append(pos)
def update_view():
if event.is_set():
return
cur_time = time.time()
if cur_time < last_invalidate[0] + invalidate_interval:
return
last_invalidate[0] = cur_time
inf_volume.invalidate()
with self.viewer.txn() as s:
s.layers['points'].annotations = [
neuroglancer.PointAnnotation(id=repr(pos), point=pos) for pos in list(seen)
]
def process_pos(pos):
spos = pos - patch_size // 2
epos = spos + patch_size
rev_slice_expr = np.s_[int(spos[2]):int(epos[2]),
int(spos[1]):int(epos[1]),
int(spos[0]):int(epos[0])]
gt_data = get_patch(spos, epos)
mask = gt_data == segment_id
for offset in ((0, 0, d), (0, 0, -d), (0, d, 0), (0, -d, 0), (d, 0, 0), (-d, 0, 0)):
if not mask[tuple(patch_size // 2 + offset)[::-1]]: continue
new_pos = np.array(pos) + np.array(offset)
enqueue(tuple(new_pos))
dist_transform = scipy.ndimage.morphology.distance_transform_edt(~mask)
inf_results[rev_slice_expr] = 1 + np.cast[np.uint8](
np.minimum(dist_transform, 5) / 5.0 * 254)
self.viewer.defer_callback(update_view)
enqueue(initial_pos)
while len(q) > 0 and not event.is_set():
i = random.randint(0, len(q) - 1)
pos = q[i]
q[i] = q[-1]
del q[-1]
process_pos(pos)
self.viewer.defer_callback(update_view)
def _stop_flood_fill(self):
if self.flood_fill_event is not None:
self.flood_fill_event.set()
self.flood_fill_event = None
def _start_flood_fill(self, pos):
self._stop_flood_fill()
inf_results = zarr.zeros(
self.gt_vol.bounds.to_list()[3:][::-1], chunks=(64, 64, 64), dtype=np.uint8)
inf_volume = neuroglancer.LocalVolume(
data=inf_results, voxel_size=list(self.gt_vol.resolution))
with self.viewer.txn() as s:
s.layers['points'] = neuroglancer.AnnotationLayer()
s.layers['inference'] = neuroglancer.ImageLayer(
source=inf_volume,
shader='''
void main() {
float v = toNormalized(getDataValue(0));
vec4 rgba = vec4(0,0,0,0);
if (v != 0.0) {
rgba = vec4(colormapJet(v), 1.0);
}
emitRGBA(rgba);
}
''',
)
self.flood_fill_event = threading.Event()
t = threading.Thread(
target=self._do_flood_fill,
kwargs=dict(
initial_pos=pos,
inf_results=inf_results,
inf_volume=inf_volume,
event=self.flood_fill_event,
))
t.daemon = True
t.start()
def _start_fill_action(self, action_state):
pos = action_state.mouse_voxel_coordinates
if pos is None:
return
self._start_flood_fill(pos)
def _stop_fill_action(self, action_state):
self._stop_flood_fill()
if __name__ == '__main__':
inf = InteractiveInference()
print(inf.viewer)
while True:
time.sleep(1000)
| |
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
Module for the creation of composite quantum objects via the tensor product.
"""
__all__ = ['tensor', 'super_tensor', 'composite', 'tensor_contract']
import numpy as np
import scipy.sparse as sp
from qutip.qobj import Qobj
from qutip.permute import reshuffle
from qutip.superoperator import operator_to_vector
import qutip.settings
import qutip.superop_reps # Avoid circular dependency here.
def tensor(*args):
"""Calculates the tensor product of input operators.
Parameters
----------
args : array_like
``list`` or ``array`` of quantum objects for tensor product.
Returns
-------
obj : qobj
A composite quantum object.
Examples
--------
>>> tensor([sigmax(), sigmax()])
Quantum object: dims = [[2, 2], [2, 2]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.+0.j 0.+0.j 1.+0.j]
[ 0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[ 0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[ 1.+0.j 0.+0.j 0.+0.j 0.+0.j]]
"""
if not args:
raise TypeError("Requires at least one input argument")
if len(args) == 1 and isinstance(args[0], (list, np.ndarray)):
# this is the case when tensor is called on the form:
# tensor([q1, q2, q3, ...])
qlist = args[0]
elif len(args) == 1 and isinstance(args[0], Qobj):
# tensor is called with a single Qobj as an argument, do nothing
return args[0]
else:
# this is the case when tensor is called on the form:
# tensor(q1, q2, q3, ...)
qlist = args
if not all([isinstance(q, Qobj) for q in qlist]):
# raise error if one of the inputs is not a quantum object
raise TypeError("One of inputs is not a quantum object")
out = Qobj()
if qlist[0].issuper:
out.superrep = qlist[0].superrep
if not all([q.superrep == out.superrep for q in qlist]):
raise TypeError("In tensor products of superroperators, all must" +
"have the same representation")
out.isherm = True
for n, q in enumerate(qlist):
if n == 0:
out.data = q.data
out.dims = q.dims
else:
out.data = sp.kron(out.data, q.data, format='csr')
out.dims = [out.dims[0] + q.dims[0], out.dims[1] + q.dims[1]]
out.isherm = out.isherm and q.isherm
if not out.isherm:
out._isherm = None
return out.tidyup() if qutip.settings.auto_tidyup else out
def super_tensor(*args):
"""Calculates the tensor product of input superoperators, by tensoring
together the underlying Hilbert spaces on which each vectorized operator
acts.
Parameters
----------
args : array_like
``list`` or ``array`` of quantum objects with ``type="super"``.
Returns
-------
obj : qobj
A composite quantum object.
"""
if isinstance(args[0], list):
args = args[0]
# Check if we're tensoring vectors or superoperators.
if all(arg.issuper for arg in args):
if not all(arg.superrep == "super" for arg in args):
raise TypeError(
"super_tensor on type='super' is only implemented for "
"superrep='super'."
)
# Reshuffle the superoperators.
shuffled_ops = list(map(reshuffle, args))
# Tensor the result.
shuffled_tensor = tensor(shuffled_ops)
# Unshuffle and return.
out = reshuffle(shuffled_tensor)
out.superrep = args[0].superrep
return out
elif all(arg.isoperket for arg in args):
# Reshuffle the superoperators.
shuffled_ops = list(map(reshuffle, args))
# Tensor the result.
shuffled_tensor = tensor(shuffled_ops)
# Unshuffle and return.
out = reshuffle(shuffled_tensor)
return out
elif all(arg.isoperbra for arg in args):
return super_tensor(*(arg.dag() for arg in args)).dag()
else:
raise TypeError(
"All arguments must be the same type, "
"either super, operator-ket or operator-bra."
)
def _isoperlike(q):
return q.isoper or q.issuper
def _isketlike(q):
return q.isket or q.isoperket
def _isbralike(q):
return q.isbra or q.isoperbra
def composite(*args):
"""
Given two or more operators, kets or bras, returns the Qobj
corresponding to a composite system over each argument.
For ordinary operators and vectors, this is the tensor product,
while for superoperators and vectorized operators, this is
the column-reshuffled tensor product.
If a mix of Qobjs supported on Hilbert and Liouville spaces
are passed in, the former are promoted. Ordinary operators
are assumed to be unitaries, and are promoted using ``to_super``,
while kets and bras are promoted by taking their projectors and
using ``operator_to_vector(ket2dm(arg))``.
"""
# First step will be to ensure everything is a Qobj at all.
if not all(isinstance(arg, Qobj) for arg in args):
raise TypeError("All arguments must be Qobjs.")
# Next, figure out if we have something oper-like (isoper or issuper),
# or something ket-like (isket or isoperket). Bra-like we'll deal with
# by turning things into ket-likes and back.
if all(map(_isoperlike, args)):
# OK, we have oper/supers.
if any(arg.issuper for arg in args):
# Note that to_super does nothing to things
# that are already type=super, while it will
# promote unitaries to superunitaries.
return super_tensor(*map(qutip.superop_reps.to_super, args))
else:
# Everything's just an oper, so ordinary tensor products work.
return tensor(*args)
elif all(map(_isketlike, args)):
# Ket-likes.
if any(arg.isoperket for arg in args):
# We have a vectorized operator, we we may need to promote
# something.
return super_tensor(*(
arg if arg.isoperket
else operator_to_vector(qutip.states.ket2dm(arg))
for arg in args
))
else:
# Everything's ordinary, so we can use the tensor product here.
return tensor(*args)
elif all(map(_isbralike, args)):
# Turn into ket-likes and recurse.
return composite(*(arg.dag() for arg in args)).dag()
else:
raise TypeError("Unsupported Qobj types [{}].".format(
", ".join(arg.type for arg in args)
))
def flatten(l):
"""Flattens a list of lists to the first level.
Given a list containing a mix of scalars and lists,
flattens down to a list of the scalars within the original
list.
Examples
--------
>>> print(flatten([[[0], 1], 2]))
[0, 1, 2]
"""
if not isinstance(l, list):
return [l]
else:
return sum(map(flatten, l), [])
def _enumerate_flat(l, idx=0):
if not isinstance(l, list):
# Found a scalar, so return and increment.
return idx, idx + 1
else:
# Found a list, so append all the scalars
# from it and recurse to keep the increment
# correct.
acc = []
for elem in l:
labels, idx = _enumerate_flat(elem, idx)
acc.append(labels)
return acc, idx
def enumerate_flat(l):
"""Labels the indices at which scalars occur in a flattened list.
Given a list containing a mix of scalars and lists,
returns a list of the same structure, where each scalar
has been replaced by an index into the flattened list.
Examples
--------
>>> print(enumerate_flat([[[10], [20, 30]], 40]))
[[[0], [1, 2]], 3]
"""
return _enumerate_flat(l)[0]
def deep_remove(l, *what):
"""Removes scalars from all levels of a nested list.
Given a list containing a mix of scalars and lists,
returns a list of the same structure, but where one or
more scalars have been removed.
Examples
--------
>>> print(deep_remove([[[[0, 1, 2]], [3, 4], [5], [6, 7]]], 0, 5))
[[[[1, 2]], [3, 4], [], [6, 7]]]
"""
if isinstance(l, list):
# Make a shallow copy at this level.
l = l[:]
for to_remove in what:
if to_remove in l:
l.remove(to_remove)
else:
l = list(map(lambda elem: deep_remove(elem, to_remove), l))
return l
def unflatten(l, idxs):
"""Unflattens a list by a given structure.
Given a list of scalars and a deep list of indices
as produced by `flatten`, returns an "unflattened"
form of the list. This perfectly inverts `flatten`.
Examples
--------
>>> l = [[[10, 20, 30], [40, 50, 60]], [[70, 80, 90], [100, 110, 120]]]
>>> idxs = enumerate_flat(l)
>>> print(unflatten(flatten(l)), idxs) == l
True
"""
acc = []
for idx in idxs:
if isinstance(idx, list):
acc.append(unflatten(l, idx))
else:
acc.append(l[idx])
return acc
def _tensor_contract_single(arr, i, j):
"""
Contracts a dense tensor along a single index pair.
"""
if arr.shape[i] != arr.shape[j]:
raise ValueError("Cannot contract over indices of different length.")
idxs = np.arange(arr.shape[i])
sl = tuple(slice(None, None, None)
if idx not in (i, j) else idxs for idx in range(arr.ndim))
return np.sum(arr[sl], axis=0)
def _tensor_contract_dense(arr, *pairs):
"""
Contracts a dense tensor along one or more index pairs,
keeping track of how the indices are relabeled by the removal
of other indices.
"""
axis_idxs = list(range(arr.ndim))
for pair in pairs:
# axis_idxs.index effectively evaluates the mapping from
# original index labels to the labels after contraction.
arr = _tensor_contract_single(arr, *map(axis_idxs.index, pair))
list(map(axis_idxs.remove, pair))
return arr
def tensor_contract(qobj, *pairs):
"""Contracts a qobj along one or more index pairs.
Note that this uses dense representations and thus
should *not* be used for very large Qobjs.
Parameters
----------
pairs : tuple
One or more tuples ``(i, j)`` indicating that the
``i`` and ``j`` dimensions of the original qobj
should be contracted.
Returns
-------
cqobj : Qobj
The original Qobj with all named index pairs contracted
away.
"""
# Record and label the original dims.
dims = qobj.dims
dims_idxs = enumerate_flat(dims)
flat_dims = flatten(dims)
# Convert to dense first, since sparse won't support the reshaping we need.
qtens = qobj.data.toarray()
# Reshape by the flattened dims.
qtens = qtens.reshape(flat_dims)
# Contract out the indices from the flattened object.
qtens = _tensor_contract_dense(qtens, *pairs)
# Remove the contracted indexes from dims so we know how to
# reshape back.
contracted_idxs = deep_remove(dims_idxs, *flatten(list(map(list, pairs))))
contracted_dims = unflatten(flat_dims, contracted_idxs)
l_mtx_dims, r_mtx_dims = map(np.product, contracted_dims)
# Reshape back into a 2D matrix.
qmtx = qtens.reshape((l_mtx_dims, r_mtx_dims))
# Return back as a qobj.
return Qobj(qmtx, dims=contracted_dims, superrep=qobj.superrep)
import qutip.states
| |
import os
import re
import struct
import sys
import threading
from pi3d.event import EventHandler
from pi3d.event import Format
from pi3d.event import Keys
from pi3d.event.FindDevices import find_devices
from pi3d.event.Constants import *
from pi3d.event.EventStream import EventStream
_KEYS = (k for k in vars(Keys) if not k.startswith('_'))
KEY_CODE = dict((k, getattr(Keys, k)) for k in _KEYS)
CODE_KEY = dict((v, k) for k, v in KEY_CODE.iteritems())
def key_to_code(key):
return KEY_CODE.get(str(key), -1) if isinstance(key, basestring) else key
def code_to_key(code):
return CODE_KEY.get(code, '')
class InputEvents(object):
"""
Encapsulates the entire InputEvents subsystem.
This is generally all you need to import. For efficiency reasons you may
want to make use of CodeOf[ ], but everything else is hidden behind this class.
On instanciation, we open all devices that are keyboards, mice or joysticks.
That means we might have two of one sort of another, and that might be a problem,
but it would be rather rare.
There are several ABS (joystick, touch) events that we do not handle. In
particular: THROTTLE, RUDDER, WHEEL, GAS, BRAKE, HAT1, HAT2, HAT3, PRESSURE,
DISTANCE, TILT, TOOL_WIDTH. Implementing these is left as an exercise
for the interested reader. Similarly, we make no attempt to handle multi-touch.
Handlers can be supplied, in which case they are called for each event, but
it isn't necessary; API exists for all the events.
The handler signatures are:
def mouse_handler_func(sourceType, SourceIndex, x, y, v, h):
def joystick_handler_func(sourceType, SourceIndex, x1, y1, z1, x2, y2, z2, hatx, haty):
def key_handler_func(sourceType, SourceIndex, key, value):
def syn_handler_func(sourceType, SourceIndex, code, value):
def unhandled_handler_func(event):
Where "sourceType" is the device type string (keyboard, mouse, joystick),
sourceIndex is an incrementing number for each device of that type, starting
at zero, and event is an EventStruct object. Key is the key code, not it's
ASCII value or anything simple. Use key_to_code() to convert from the name of a
key to its code, and code_to_key() to convert a code to a name. The keys are
listed in pi3d.event.Constants.py or /usr/include/linux/input.h Note that the key
names refer to a US keyboard.
"""
def __init__(self, keyboardHandler=None, mouseHandler=None, joystickHandler=None, synHandler=None, unhandledHandler=None, wantKeyboard=True, wantMouse=True, wantJoystick=True):
self.unhandledHandler = unhandledHandler
self.streams = [ ]
if wantKeyboard:
keyboards = find_devices("kbd")
self.streams += map(lambda x: EventStream(x, "keyboard"),keyboards)
else:
keyboards = [ ]
print "keyboards =", keyboards
if wantMouse:
mice = find_devices("mouse", butNot=keyboards)
print "mice = ", mice
self.streams += map(lambda x: EventStream(x, "mouse"), mice)
else:
mice = [ ]
if wantJoystick:
joysticks = find_devices("js", butNot=keyboards+mice)
print "joysticks =", joysticks
js_streams = map(lambda x: EventStream(x, "joystick"), joysticks)
self.streams += js_streams
for x in self.streams:
x.acquire_abs_info()
self.handler = EventHandler.EventHandler(
keyboardHandler, mouseHandler, joystickHandler, synHandler)
def do_input_events(self):
"""
Handle all events that have been triggered since the last call.
"""
for event in EventStream.allNext(self.streams):
if self.handler.event(event) and self.unhandledHandler:
self.unhandledHandler(event)
def key_state(self,key):
"""
Returns the state of the given key.
The returned value will be 0 for key-up, or 1 for key-down. This method
returns a key-held(2) as 1 to aid in using the returned value as a
movement distance.
This function accepts either the key code or the string name of the key.
It would be more efficient to look-up and store the code of
the key with KEY_CODE[ ], rather than using the string every time. (Which
involves a dict look-up keyed with a string for every key_state call, every
time around the loop.)
Gamepad keys are:
Select = BTN_BASE3, Start = BTN_BASE4
L1 = BTN_TOP R1 = BTN_BASE
L2 = BTN_PINKIE R2 = BTN_BASE2
The action buttons are:
BTN_THUMB
BTN_TRIGGER BTN_TOP
BTN_THUMB2
Analogue Left Button = BTN_BASE5
Analogue Right Button = BTN_BASE6
Some of those may clash with extended mouse buttons, so if you are using
both at once, you'll see some overlap.
The direction pad is hat0 (see get_hat)
"""
return self.handler.key_state(key_to_code(key))
def clear_key(self,key):
"""
Clears the state of the given key.
Emulates a key-up, but does not call any handlers.
"""
return self.handler.clear_key(key_to_code(key))
def get_joystick(self, index=0):
"""
Returns the x,y coordinates for a joystick or left gamepad analogue stick.
index is the device index and defaults to 0 -- the first joystick device
The values are returned as a tuple. All values are -1.0 to +1.0 with
0.0 being centred.
"""
return (self.handler.absx[index], self.handler.absy[index])
def get_joystick3d(self, index=0):
"""
Returns the x,y,z coordinates for a joystick or left gamepad analogue stick
index is the device index and defaults to 0 -- the first joystick device
The values are returned as a tuple. All values are -1.0 to +1.0 with
0.0 being centred.
"""
return (self.handler.absx[index], self.handler.absy[index], self.handler.absz[index])
def get_joystickR(self,index=0):
"""
Returns the x,y coordinates for a right gamepad analogue stick.
index is the device index and defaults to 0 -- the first joystick device
The values are returned as a tuple. For some odd reason, the gamepad
returns values in the Z axes of both joysticks, with y being the first.
All values are -1.0 to +1.0 with 0.0 being centred.
"""
return (self.handler.absz2[index], self.handler.absz[index])
def get_joystickB3d(self, index=0):
"""
Returns the x,y,z coordinates for a 2nd joystick control
index is the device index and defaults to 0 -- the first joystick device
The values are returned as a tuple. All values are -1.0 to +1.0 with
0.0 being centred.
"""
return (self.handler.absx2[index], self.handler.absy2[index], self.handler.absz2[index])
def get_hat(self, index=0):
"""
Returns the x,y coordinates for a joystick hat or gamepad direction pad
index is the device index and defaults to 0 -- the first joystick device
The values are returned as a tuple. All values are -1.0 to +1.0 with
0.0 being centred.
"""
return (self.handler.abshatx[index], self.handler.abshaty[index])
def get_mouse_movement(self, index=0):
"""
Returns the accumulated mouse movements since the last call.
index is the device index and defaults to 0 -- the first mouse device
The returned value is a tuple: (X, Y, WHEEL, H-WHEEL)
"""
return self.handler.get_rel_movement(index)
def grab_by_type(self, deviceType, deviceIndex=None, grab=True):
"""
Grab (or release) exclusive access to all devices of the given type.
The devices are grabbed if grab is True and released if grab is False.
If the deviceIndex is given, only that device is grabbed, otherwise all
the devices of the same type are grabbed.
All devices are grabbed to begin with. We might want to ungrab the
keyboard for example to use it for text entry. While not grabbed, all key-down
and key-hold events are filtered out, but that only works if the events
are received and handled while the keyboard is still grabbed, and the loop
may not have been running. So if we are grabbing a device, we call the
handling loop first, so there are no outstanding events.
Note that the filtering means that if you trigger the ungrab from a
key-down event, the corrosponding key-up will be actioned before the
subsequent grab, and you wont end up looping continuously. However it
also means that you will see key-up events for all the text entry. Since
it only affects a user-supplied key-handler, and key-ups do not usually
trigger actions anyway, this is not likely to be a problem. If it is,
you will have to filter them yourself.
"""
if grab:
self.do_input_events()
EventStream.grab_by_type(deviceType, deviceIndex, grab, self.streams)
def release(self):
"""
Ungrabs all streams and closes all files.
Only do this when you're finished with this object. You can't use it again.
"""
for s in self.streams:
s.release()
| |
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to transform and merge sancov files into human readable json-format.
The script supports three actions:
all: Writes a json file with all instrumented lines of all executables.
merge: Merges sancov files with coverage output into an existing json file.
split: Split json file into separate files per covered source file.
The json data is structured as follows:
{
"version": 1,
"tests": ["executable1", "executable2", ...],
"files": {
"file1": [[<instr line 1>, <bit_mask>], [<instr line 2>, <bit_mask>], ...],
"file2": [...],
...
}
}
The executables are sorted and determine the test bit mask. Their index+1 is
the bit, e.g. executable1 = 1, executable3 = 4, etc. Hence, a line covered by
executable1 and executable3 will have bit_mask == 5 == 0b101. The number of
tests is restricted to 52 in version 1, to allow javascript JSON parsing of
the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
The line-number-bit_mask pairs are sorted by line number and don't contain
duplicates.
Split json data preserves the same format, but only contains one file per
json file.
The sancov tool is expected to be in the llvm compiler-rt third-party
directory. It's not checked out by default and must be added as a custom deps:
'v8/third_party/llvm/projects/compiler-rt':
'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
"""
import argparse
import json
import logging
import os
import re
import subprocess
import sys
from multiprocessing import Pool, cpu_count
logging.basicConfig(level=logging.INFO)
# Files to exclude from coverage. Dropping their data early adds more speed.
# The contained cc files are already excluded from instrumentation, but inlined
# data is referenced through v8's object files.
EXCLUSIONS = [
'buildtools',
'src/third_party',
'third_party',
'test',
'testing',
]
# Executables found in the build output for which no coverage is generated.
# Exclude them from the coverage data file.
EXE_BLACKLIST = [
'generate-bytecode-expectations',
'hello-world',
'mksnapshot',
'parser-shell',
'process',
'shell',
]
# V8 checkout directory.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
# Executable location. TODO(machenbach): Only release is supported for now.
BUILD_DIR = os.path.join(BASE_DIR, 'out', 'Release')
# Path prefix added by the llvm symbolizer including trailing slash.
OUTPUT_PATH_PREFIX = os.path.join(BUILD_DIR, '..', '..', '')
# The sancov tool location.
SANCOV_TOOL = os.path.join(
BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
'lib', 'sanitizer_common', 'scripts', 'sancov.py')
# Simple script to sanitize the PCs from objdump.
SANITIZE_PCS = os.path.join(BASE_DIR, 'tools', 'sanitizers', 'sanitize_pcs.py')
# The llvm symbolizer location.
SYMBOLIZER = os.path.join(
BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
'llvm-symbolizer')
# Number of cpus.
CPUS = cpu_count()
# Regexp to find sancov files as output by sancov_merger.py. Also grabs the
# executable name in group 1.
SANCOV_FILE_RE = re.compile(r'^(.*)\.result.sancov$')
def executables():
"""Iterates over executable files in the build directory."""
for f in os.listdir(BUILD_DIR):
file_path = os.path.join(BUILD_DIR, f)
if (os.path.isfile(file_path) and
os.access(file_path, os.X_OK) and
f not in EXE_BLACKLIST):
yield file_path
def process_symbolizer_output(output):
"""Post-process llvm symbolizer output.
Excludes files outside the v8 checkout or given in exclusion list above
from further processing. Drops the character index in each line.
Returns: A mapping of file names to lists of line numbers. The file names
have relative paths to the v8 base directory. The lists of line
numbers don't contain duplicate lines and are sorted.
"""
# Drop path prefix when iterating lines. The path is redundant and takes
# too much space. Drop files outside that path, e.g. generated files in
# the build dir and absolute paths to c++ library headers.
def iter_lines():
for line in output.strip().splitlines():
if line.startswith(OUTPUT_PATH_PREFIX):
yield line[len(OUTPUT_PATH_PREFIX):]
# Map file names to sets of instrumented line numbers.
file_map = {}
for line in iter_lines():
# Drop character number, we only care for line numbers. Each line has the
# form: <file name>:<line number>:<character number>.
file_name, number, _ = line.split(':')
file_map.setdefault(file_name, set([])).add(int(number))
# Remove exclusion patterns from file map. It's cheaper to do it after the
# mapping, as there are few excluded files and we don't want to do this
# check for numerous lines in ordinary files.
def keep(file_name):
for e in EXCLUSIONS:
if file_name.startswith(e):
return False
return True
# Return in serializable form and filter.
return {k: sorted(file_map[k]) for k in file_map if keep(k)}
def get_instrumented_lines(executable):
"""Return the instrumented lines of an executable.
Called trough multiprocessing pool.
Returns: Post-processed llvm output as returned by process_symbolizer_output.
"""
# The first two pipes are from llvm's tool sancov.py with 0x added to the hex
# numbers. The results are piped into the llvm symbolizer, which outputs for
# each PC: <file name with abs path>:<line number>:<character number>.
# We don't call the sancov tool to get more speed.
process = subprocess.Popen(
'objdump -d %s | '
'grep \'^\s\+[0-9a-f]\+:.*\scall\(q\|\)\s\+[0-9a-f]\+ '
'<__sanitizer_cov\(_with_check\|\)\(@plt\|\)>\' | '
'grep \'^\s\+[0-9a-f]\+\' -o | '
'%s | '
'%s --obj %s -functions=none' %
(executable, SANITIZE_PCS, SYMBOLIZER, executable),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=BASE_DIR,
shell=True,
)
output, _ = process.communicate()
assert process.returncode == 0
return process_symbolizer_output(output)
def merge_instrumented_line_results(exe_list, results):
"""Merge multiprocessing results for all instrumented lines.
Args:
exe_list: List of all executable names with absolute paths.
results: List of results as returned by get_instrumented_lines.
Returns: Dict to be used as json data as specified on the top of this page.
The dictionary contains all instrumented lines of all files
referenced by all executables.
"""
def merge_files(x, y):
for file_name, lines in y.iteritems():
x.setdefault(file_name, set([])).update(lines)
return x
result = reduce(merge_files, results, {})
# Return data as file->lines mapping. The lines are saved as lists
# with (line number, test bits (as int)). The test bits are initialized with
# 0, meaning instrumented, but no coverage.
# The order of the test bits is given with key 'tests'. For now, these are
# the executable names. We use a _list_ with two items instead of a tuple to
# ease merging by allowing mutation of the second item.
return {
'version': 1,
'tests': sorted(map(os.path.basename, exe_list)),
'files': {f: map(lambda l: [l, 0], sorted(result[f])) for f in result},
}
def write_instrumented(options):
"""Implements the 'all' action of this tool."""
exe_list = list(executables())
logging.info('Reading instrumented lines from %d executables.',
len(exe_list))
pool = Pool(CPUS)
try:
results = pool.imap_unordered(get_instrumented_lines, exe_list)
finally:
pool.close()
# Merge multiprocessing results and prepare output data.
data = merge_instrumented_line_results(exe_list, results)
logging.info('Read data from %d executables, which covers %d files.',
len(data['tests']), len(data['files']))
logging.info('Writing results to %s', options.json_output)
# Write json output.
with open(options.json_output, 'w') as f:
json.dump(data, f, sort_keys=True)
def get_covered_lines(args):
"""Return the covered lines of an executable.
Called trough multiprocessing pool. The args are expected to unpack to:
cov_dir: Folder with sancov files merged by sancov_merger.py.
executable: The executable that was called to produce the given coverage
data.
sancov_file: The merged sancov file with coverage data.
Returns: A tuple of post-processed llvm output as returned by
process_symbolizer_output and the executable name.
"""
cov_dir, executable, sancov_file = args
# Let the sancov tool print the covered PCs and pipe them through the llvm
# symbolizer.
process = subprocess.Popen(
'%s print %s 2> /dev/null | '
'%s --obj %s -functions=none' %
(SANCOV_TOOL,
os.path.join(cov_dir, sancov_file),
SYMBOLIZER,
os.path.join(BUILD_DIR, executable)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=BASE_DIR,
shell=True,
)
output, _ = process.communicate()
assert process.returncode == 0
return process_symbolizer_output(output), executable
def merge_covered_line_results(data, results):
"""Merge multiprocessing results for covered lines.
The data is mutated, the results are merged into it in place.
Args:
data: Existing coverage data from json file containing all instrumented
lines.
results: List of results as returned by get_covered_lines.
"""
# List of executables and mapping to the test bit mask. The number of
# tests is restricted to 52, to allow javascript JSON parsing of
# the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
exe_list = data['tests']
assert len(exe_list) <= 52, 'Max 52 different tests are supported.'
test_bit_masks = {exe:1<<i for i, exe in enumerate(exe_list)}
def merge_lines(old_lines, new_lines, mask):
"""Merge the coverage data of a list of lines.
Args:
old_lines: Lines as list of pairs with line number and test bit mask.
The new lines will be merged into the list in place.
new_lines: List of new (covered) lines (sorted).
mask: The bit to be set for covered lines. The bit index is the test
index of the executable that covered the line.
"""
i = 0
# Iterate over old and new lines, both are sorted.
for l in new_lines:
while old_lines[i][0] < l:
# Forward instrumented lines not present in this coverage data.
i += 1
# TODO: Add more context to the assert message.
assert i < len(old_lines), 'Covered line %d not in input file.' % l
assert old_lines[i][0] == l, 'Covered line %d not in input file.' % l
# Add coverage information to the line.
old_lines[i][1] |= mask
def merge_files(data, result):
"""Merge result into data.
The data is mutated in place.
Args:
data: Merged coverage data from the previous reduce step.
result: New result to be merged in. The type is as returned by
get_covered_lines.
"""
file_map, executable = result
files = data['files']
for file_name, lines in file_map.iteritems():
merge_lines(files[file_name], lines, test_bit_masks[executable])
return data
reduce(merge_files, results, data)
def merge(options):
"""Implements the 'merge' action of this tool."""
# Check if folder with coverage output exists.
assert (os.path.exists(options.coverage_dir) and
os.path.isdir(options.coverage_dir))
# Inputs for multiprocessing. List of tuples of:
# Coverage dir, executable name, sancov file name.
inputs = []
for f in os.listdir(options.coverage_dir):
match = SANCOV_FILE_RE.match(f)
if match:
inputs.append((options.coverage_dir, match.group(1), f))
logging.info('Merging %d sancov files into %s',
len(inputs), options.json_input)
# Post-process covered lines in parallel.
pool = Pool(CPUS)
try:
results = pool.imap_unordered(get_covered_lines, inputs)
finally:
pool.close()
# Load existing json data file for merging the results.
with open(options.json_input, 'r') as f:
data = json.load(f)
# Merge muliprocessing results. Mutates data.
merge_covered_line_results(data, results)
logging.info('Merged data from %d executables, which covers %d files.',
len(data['tests']), len(data['files']))
logging.info('Writing results to %s', options.json_output)
# Write merged results to file.
with open(options.json_output, 'w') as f:
json.dump(data, f, sort_keys=True)
def split(options):
"""Implements the 'split' action of this tool."""
# Load existing json data file for splitting.
with open(options.json_input, 'r') as f:
data = json.load(f)
logging.info('Splitting off %d coverage files from %s',
len(data['files']), options.json_input)
for file_name, coverage in data['files'].iteritems():
# Preserve relative directories that are part of the file name.
file_path = os.path.join(options.output_dir, file_name + '.json')
try:
os.makedirs(os.path.dirname(file_path))
except OSError:
# Ignore existing directories.
pass
with open(file_path, 'w') as f:
# Flat-copy the old dict.
new_data = dict(data)
# Update current file.
new_data['files'] = {file_name: coverage}
# Write json data.
json.dump(new_data, f, sort_keys=True)
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--coverage-dir',
help='Path to the sancov output files.')
parser.add_argument('--json-input',
help='Path to an existing json file with coverage data.')
parser.add_argument('--json-output',
help='Path to a file to write json output to.')
parser.add_argument('--output-dir',
help='Directory where to put split output files to.')
parser.add_argument('action', choices=['all', 'merge', 'split'],
help='Action to perform.')
options = parser.parse_args(args)
if options.action.lower() == 'all':
if not options.json_output:
print '--json-output is required'
return 1
write_instrumented(options)
elif options.action.lower() == 'merge':
if not options.coverage_dir:
print '--coverage-dir is required'
return 1
if not options.json_input:
print '--json-input is required'
return 1
if not options.json_output:
print '--json-output is required'
return 1
merge(options)
elif options.action.lower() == 'split':
if not options.json_input:
print '--json-input is required'
return 1
if not options.output_dir:
print '--output-dir is required'
return 1
split(options)
return 0
if __name__ == '__main__':
sys.exit(main())
| |
#!/usr/bin/env python
# build_docs.py
# Copyright (c) 2013-2016 Pablo Acosta-Serafini
# See LICENSE for details
# pylint: disable=C0111,F0401,R0914,W0141
# Standard library imports
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import multiprocessing
import os
import re
import shutil
import sys
# PyPI imports
from cogapp import Cog
# Putil imports
import sbin.refresh_moddb
import sbin.build_moddb
import sbin.functions
###
# Global variables
###
PLOT_SUBMODULES = [
'basic_source', 'csv_source', 'figure', 'functions', 'panel', 'series'
]
PCSV_SUBMODULES = [
'concatenate', 'csv_file', 'dsort', 'merge', 'replace', 'write'
]
VALID_MODULES = ['eng', 'pcsv', 'plot', 'tree']
###
# Functions
###
def build_pkg_docs(args):
""" Build documentation """
# pylint: disable=R0912,R0915
debug = False
retcode = 0
pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
src_dir = args.directory
cog_exe = which('cog.py')
if not cog_exe:
raise RuntimeError('cog binary could not be found')
os.environ['NOPTION'] = (
'-n {0}'.format(args.num_cpus) if args.num_cpus > 1 else ''
)
rebuild = args.rebuild
test = args.test
modules = args.module_name
tracer_dir = os.path.join(pkg_dir, 'docs', 'support')
os.environ['TRACER_DIR'] = tracer_dir
# Processing
del_pkl_files(test, tracer_dir)
print('Rebuilding documentation')
if debug:
print('Python: {0}'.format(sys.executable))
print('PATH: {0}'.format(os.environ['PATH']))
print('PYTHONPATH: {0}'.format(os.environ['PYTHONPATH']))
print('Cog: {0}'.format(cog_exe))
print('sbin.functions: {0}'.format(sbin.functions.__file__))
print(
'sbin.functions.subprocess: {0}'.format(
sbin.functions.subprocess.__file__
)
)
if rebuild or test:
sbin.refresh_moddb.refresh_moddb()
print_cyan(
'Rebuilding exceptions documentation{0}'.format(
' (test mode)' if test else ''
),
)
start_time = datetime.datetime.today()
for module in modules:
tmp_retcode = rebuild_module_doc(
test, src_dir, tracer_dir, cog_exe, module, debug
)
retcode = tmp_retcode if not retcode else retcode
stop_time = datetime.datetime.today()
print(
'Elapsed time: {0}'.format(
elapsed_time_string(start_time, stop_time)
)
)
sbin.build_moddb.build_moddb()
print('Performing module-specific actions')
for module in modules:
process_module(tracer_dir, module)
print('Inserting files into docstrings')
insert_files_in_docstrings(src_dir, cog_exe)
insert_files_in_rsts(pkg_dir, cog_exe)
generate_top_level_readme(pkg_dir)
print('Generating HTML output')
shutil.rmtree(os.path.join(pkg_dir, 'docs', '_build'), ignore_errors=True)
cwd = os.getcwd()
os.chdir(os.path.join(pkg_dir, 'docs'))
sbin.functions.shcmd(
[
'sphinx-build',
'-b', 'html',
'-d', os.path.join('_build', 'doctrees'),
'-W', '.',
os.path.join('_build', 'html')
],
'Error building Sphinx documentation',
async_stdout=True
)
# Copy built documentation to its own directory
# dest_dir = os.path.join(pkg_dir, 'docs', 'html')
# src_dir = os.path.join(pkg_dir, 'docs', '_build', 'html')
# shutil.rmtree(dest_dir, ignore_errors=True)
# shutil.copytree(src_dir, dest_dir)
os.chdir(cwd)
return retcode
def copy_file(src, dest):
""" Copy file (potentially overwriting existing file) """
try:
os.remove(dest)
except OSError:
pass
shutil.copy(src, dest)
def del_file(fname):
""" Delete file """
try:
os.remove(fname)
except OSError:
pass
def del_pkl_files(test, tracer_dir):
""" Delete all old pickle files """
if test:
pkl_files = (
glob.glob(os.path.join(tracer_dir, '*.pkl'))
+
glob.glob(os.path.join(tracer_dir, 'plot', '*.pkl'))
+
glob.glob(os.path.join(tracer_dir, 'pcsv', '*.pkl'))
)
for pkl_file in pkl_files:
os.remove(pkl_file)
def diff(file1, file2):
""" Diff two files """
with open(file1, 'r') as fobj1:
flines1 = [item.rstrip() for item in fobj1.readlines()]
with open(file2, 'r') as fobj2:
flines2 = [item.rstrip() for item in fobj2.readlines()]
return list(
difflib.unified_diff(flines1, flines2, fromfile=file1, tofile=file2)
)
def elapsed_time_string(start_time, stop_time):
"""
Returns a formatted string with the elapsed time between two time points
"""
delta_time = stop_time-start_time
tot_seconds = int(
(
delta_time.microseconds+
(delta_time.seconds+delta_time.days*24*3600)*10**6
)
/
10**6
)
years, remainder = divmod(tot_seconds, 365*24*60*60)
months, remainder = divmod(remainder, 30*24*60*60)
days, remainder = divmod(remainder, 24*60*60)
hours, remainder = divmod(remainder, 60*60)
minutes, seconds = divmod(remainder, 60)
token_iter = zip(
[years, months, days, hours, minutes, seconds],
['year', 'month', 'day', 'hour', 'minute', 'second']
)
ret_list = [
'{token} {token_name}{plural}'.format(
token=num,
token_name=desc,
plural='s' if num > 1 else ''
) for num, desc in token_iter if num > 0
]
if len(ret_list) == 0:
return 'None'
elif len(ret_list) == 1:
return ret_list[0]
elif len(ret_list) == 2:
return ret_list[0]+' and '+ret_list[1]
else:
return (', '.join(ret_list[0:-1]))+' and '+ret_list[-1]
def insert_files_in_docstrings(src_dir, cog_exe):
""" Cog-insert source files in docstrings """
modules = ['misc', 'pcontracts', 'plot', 'tree']
for module in modules:
if module == 'plot':
module_dir = os.path.join(src_dir, 'plot')
submodules = PLOT_SUBMODULES
submodules.pop(PLOT_SUBMODULES.index('series'))
else:
module_dir = src_dir
submodules = [module]
for submodule in submodules:
smf = os.path.join(module_dir, submodule+'.py')
print(' Processing module {0}'.format(smf))
retcode = Cog().main(
[
cog_exe,
"--markers==[=cog =]= =[=end=]=",
'-e', '-x', '-o', smf+'.tmp', smf
],
)
if retcode:
raise RuntimeError(
'Error deleting insertion of source files in '
'docstrings in module {0}'.format(submodule)
)
retcode = Cog().main(
[
cog_exe,
"--markers==[=cog =]= =[=end=]=",
'-e', '-o', smf+'.tmp', smf
]
)
if retcode:
raise RuntimeError(
'Error inserting source files in '
'docstrings in module {0}'.format(submodule)
)
move_file(smf+'.tmp', smf)
def insert_files_in_rsts(pkg_dir, cog_exe):
""" Cog-insert source files in Sphinx files """
fnames = [
os.path.join(pkg_dir, 'docs', 'README.rst'),
os.path.join(pkg_dir, 'docs', 'pcontracts.rst')
]
print('Inserting source files in documentation files')
for fname in fnames:
print(' Processing file {0}'.format(fname))
retcode = Cog().main(
[
cog_exe,
'-e', '-x', '-o', fname+'.tmp', fname
]
)
if retcode:
raise RuntimeError(
'Error deleting insertion of source files in '
'documentation file {0}'.format(fname)
)
retcode = Cog().main(
[
cog_exe,
'-e', '-o', fname+'.tmp', fname
]
)
if retcode:
raise RuntimeError(
'Error inserting source files in '
'docstrings in module {0}'.format(fname)
)
move_file(fname+'.tmp', fname)
def move_file(src, dest):
""" Copy file (potentially overwriting existing file) """
try:
os.remove(dest)
except OSError:
pass
shutil.move(src, dest)
def pcolor(text, color, indent=0):
r"""
Returns a string that once printed is colorized (copied from putil.misc)
:param text: Text to colorize
:type text: string
:param color: Color to use, one of :code:`'black'`, :code:`'red'`,
:code:`'green'`, :code:`'yellow'`, :code:`'blue'`,
:code:`'magenta'`, :code:`'cyan'`, :code:`'white'` or
:code:`'none'` (case insensitive)
:type color: string
:param indent: Number of spaces to prefix the output with
:type indent: integer
:rtype: string
:raises:
* RuntimeError (Argument \`color\` is not valid)
* RuntimeError (Argument \`indent\` is not valid)
* RuntimeError (Argument \`text\` is not valid)
* ValueError (Unknown color *[color]*)
"""
esc_dict = {
'black':30, 'red':31, 'green':32, 'yellow':33, 'blue':34, 'magenta':35,
'cyan':36, 'white':37, 'none':-1
}
if not isinstance(text, str):
raise RuntimeError('Argument `text` is not valid')
if not isinstance(color, str):
raise RuntimeError('Argument `color` is not valid')
if not isinstance(indent, int):
raise RuntimeError('Argument `indent` is not valid')
color = color.lower()
if color not in esc_dict:
raise ValueError('Unknown color {color}'.format(color=color))
if esc_dict[color] != -1:
return (
'\033[{color_code}m{indent}{text}\033[0m'.format(
color_code=esc_dict[color],
indent=' '*indent,
text=text
)
)
return '{indent}{text}'.format(indent=' '*indent, text=text)
def print_diff(tlist, indent=3):
""" Pretty prints file differences """
ret = []
ret.append((indent*' ')+tlist[0][1:-2])
ret.append((indent*' ')+tlist[1][1:-2])
for line in tlist[2:]:
ret.append((indent*' ')+str(line.rstrip()))
return '\n'.join(ret)
def print_cyan(text):
""" Print text to STDOUT in cyan color """
print(pcolor(text, 'cyan'))
def print_green(text):
""" Print text to STDOUT in green color """
print(pcolor(text, 'green'))
def print_red(text):
""" Print text to STDOUT in red color """
print(pcolor(text, 'red'))
def process_module(tracer_dir, module):
""" Perform process-specific actions """
if module == 'plot':
print(' Processing module {0}'.format(module))
sbin.functions.shcmd(
[
'python',
os.path.join(tracer_dir, 'plot_example_1.py'),
'plot_example_1.png', '1'
],
'Error processing module {0}'.format(module)
)
def rebuild_module_doc(test, src_dir, tracer_dir, cog_exe, module, debug):
# pylint: disable=R0913
retcode = 0
pkl_dir = tracer_dir
if module == 'plot':
module_dir = os.path.join(src_dir, 'plot')
submodules = PLOT_SUBMODULES
elif module == 'pcsv':
module_dir = os.path.join(src_dir, 'pcsv')
submodules = PCSV_SUBMODULES
else:
module_dir = src_dir
submodules = [module]
for submodule in submodules:
smf = os.path.join(module_dir, submodule+'.py')
pkl_file = os.path.join(pkl_dir, submodule+'.pkl')
print_cyan('Processing module {0}'.format(submodule))
orig_file = smf+'.orig'
if test:
shutil.copy(smf, orig_file)
stdout = sbin.functions.shcmd(
[sys.executable, cog_exe, '-e', '-o', smf+'.tmp', smf],
(
'Error generating exceptions documentation '
'in module {0}'.format(smf)
),
async_stdout=debug
)
move_file(smf+'.tmp', smf)
if test:
diff_list = diff(smf, orig_file)
if not diff_list:
print_green(' File {0} identical from original'.format(smf))
del_file(pkl_file)
else:
print_red(' File {0} differs from original'.format(smf))
print(' Differences:')
print(print_diff(diff_list))
copy_file(smf, smf+'.error')
retcode = 1
move_file(orig_file, smf)
else:
print(stdout)
del_file(pkl_file)
return retcode
def generate_top_level_readme(pkg_dir):
"""
Remove Sphinx-specific cross-references from top-level README.rst file,
they are not rendered by either Bitbucket or GitHub
"""
# pylint: disable=W0212
docs_dir = os.path.abspath(os.path.join(pkg_dir, 'docs'))
fname = os.path.join(docs_dir, 'README.rst')
print('Generating top-level README.rst file')
with open(fname, 'r') as fobj:
lines = [item.rstrip() for item in fobj.readlines()]
ref_regexp = re.compile('.*:py:mod:`(.+) <putil.(.+)>`.*')
rst_cmd_regexp = re.compile('^\\s*.. \\S+::.*')
indent_regexp = re.compile('^(\\s*)\\S+')
ret = []
autofunction = False
for line in lines:
match = ref_regexp.match(line)
if autofunction:
match = indent_regexp.match(line)
if (not match) or (match and len(match.group(1)) == 0):
autofunction = False
ret.append(line)
elif match:
# Remove cross-references
label = match.group(1)
mname = match.group(2)
line = line.replace(
':py:mod:`{label} <putil.{mname}>`'.format(
label=label, mname=mname
),
label
)
ret.append(line)
elif line.lstrip().startswith('.. include::'):
# Include files
base_fname = line.split()[-1].strip()
fname = os.path.basename(base_fname)
# Do not include the change log, PyPI adds it at the end
# of the README.rst file by default and in a hosted Git
# repository there is a much more detailed built-in change
# log in the commit message history
if fname != 'CHANGELOG.rst':
fname = os.path.join(docs_dir, base_fname)
for inc_line in sbin.functions._readlines(fname):
comment = inc_line.lstrip().startswith('.. ')
if ((not comment)
or (comment and rst_cmd_regexp.match(inc_line))):
ret.append(inc_line.rstrip())
elif line.lstrip().startswith('.. autofunction::'):
# Remove auto-functions, PyPI reStructuredText parser
# does not appear to like it
autofunction = True
else:
ret.append(line)
fname = os.path.join(pkg_dir, 'README.rst')
with open(fname, 'w') as fobj:
fobj.write('\n'.join(ret))
# Check that generated file produces HTML version without errors
sbin.functions.shcmd(
['rst2html.py', '--exit-status=3', fname],
'Error validating top-level README.rst HTML conversion',
)
def valid_dir(value):
""" Argparse checked for directory argument """
if not os.path.isdir(value):
raise argparse.ArgumentTypeError(
'directory {0} does not exist'.format(value)
)
return os.path.abspath(value)
def valid_module_name(value):
""" Argparse checked for module_name argument """
ret = []
for item in value:
if item not in VALID_MODULES:
raise argparse.ArgumentTypeError(
'module {0} not one of {1}'.format(
item.lower(), ', '.join(VALID_MODULES)
)
)
ret.append(item.lower())
return ret
def valid_num_cpus(value):
""" Argparse checker for num_cpus argument """
# pylint: disable=E1101
try:
value = int(value)
except:
raise argparse.ArgumentTypeError(
'invalid positive integer value: {0}'.format(value)
)
if value < 1:
raise argparse.ArgumentTypeError(
'invalid positive integer value: {0}'.format(value)
)
max_cpus = multiprocessing.cpu_count()
if value > max_cpus:
raise argparse.ArgumentTypeError(
'requested CPUs ({0}) greater than '
'available CPUs ({1})'.format(value, max_cpus)
)
return value
def which(name):
""" Search PATH for executable files with the given name """
# Inspired by https://twistedmatrix.com/trac/browser/tags/releases/
# twisted-8.2.0/twisted/python/procutils.py
result = []
path = os.environ.get('PATH', None)
if path is None:
return []
for pdir in os.environ.get('PATH', '').split(os.pathsep):
fname = os.path.join(pdir, name)
if os.path.isfile(fname) and os.access(fname, os.X_OK):
result.append(fname)
return result[0] if result else None
if __name__ == "__main__":
# pylint: disable=E0602
PKG_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PARSER = argparse.ArgumentParser(
description='Build putil package documentation'
)
PARSER.add_argument(
'-d', '--directory',
help='specify source file directory (default ../putil)',
type=valid_dir,
nargs=1,
default=[os.path.join(PKG_DIR, 'putil')]
)
PARSER.add_argument(
'-r', '--rebuild',
help=(
'rebuild exceptions documentation. If no module name '
'is given all modules with auto-generated exceptions '
'documentation are rebuilt'
),
action="store_true"
)
PARSER.add_argument(
'-n', '--num-cpus',
help='number of CPUs to use (default: 1)',
type=valid_num_cpus,
default=1
)
PARSER.add_argument(
'-t', '--test',
help=(
'diff original and rebuilt file(s) (exit code 0 '
'indicates file(s) are identical, exit code 1 '
'indicates file(s) are different)'
),
action="store_true"
)
PARSER.add_argument(
'module_name',
help='Module name for which to build documentation for',
nargs='*',
default=VALID_MODULES
)
ARGS = PARSER.parse_args()
ARGS.module_name = valid_module_name(ARGS.module_name)
ARGS.directory = ARGS.directory[0]
if ARGS.rebuild and (not ARGS.test):
if sys.hexversion < 0x03000000: # pragma: no cover
VAR = raw_input('Are you sure [Y/N]? ')
else:
VAR = input('Are you sure [Y/N]? ')
if VAR.lower() != 'y':
sys.exit(0)
sys.exit(build_pkg_docs(ARGS))
| |
"""The tests for the sun automation."""
from datetime import datetime
from unittest.mock import patch
import pytest
from homeassistant.components import sun
import homeassistant.components.automation as automation
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SUN_EVENT_SUNRISE,
SUN_EVENT_SUNSET,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, async_mock_service, mock_component
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
dt_util.set_default_time_zone(hass.config.time_zone)
hass.loop.run_until_complete(
async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
)
def teardown():
"""Restore."""
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
async def test_sunset_trigger(hass, calls, legacy_patchable_time):
"""Test the sunset trigger."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 0
with patch("homeassistant.util.dt.utcnow", return_value=now):
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunrise_trigger(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunset trigger with offset."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNSET,
"offset": "0:30:00",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event", "offset"))
},
},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "sun - sunset - 0:30:00"
async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger with offset."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNRISE,
"offset": "-0:30:00",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_action_before_sunrise_no_offset(hass, calls):
"""
Test if action was before sunrise.
Before sunrise is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 9, 16, 13, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset(hass, calls):
"""
Test if action was after sunrise.
After sunrise is true from sunrise until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s -> 'after sunrise' not true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1s -> 'after sunrise' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunrise' not true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunrise' true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunrise_with_offset(hass, calls):
"""
Test if action was before sunrise with offset.
Before sunrise is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": SUN_EVENT_SUNRISE,
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise + 1s + 1h -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 14, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1h -> 'before sunrise' with offset +1h true
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC midnight -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC midnight - 1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' with offset +1h true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = sunset -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 1, 56, 48, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = sunset -1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 1, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_with_offset(hass, calls):
"""
Test if action was before sunset with offset.
Before sunset is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": "sunset",
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = local midnight -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset + 1s + 1h -> 'before sunset' with offset +1h not true
now = datetime(2015, 9, 17, 2, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset + 1h -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 17, 2, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = UTC midnight -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
# now = UTC midnight - 1s -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
# now = sunrise -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
# now = sunrise -1s -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
# now = local midnight-1s -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunrise_with_offset(hass, calls):
"""
Test if action was after sunrise with offset.
After sunrise is true from sunrise until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s + 1h -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 14, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1h -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC noon -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 12, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC noon - 1s -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 11, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local noon -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 19, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local noon - 1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 18, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
# now = sunset -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
# now = sunset + 1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
# now = local midnight-1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
# now = local midnight -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunset_with_offset(hass, calls):
"""
Test if action was after sunset with offset.
After sunset is true from sunset until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": "sunset",
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-15 06:32:05 local, sunset: 2015-09-15 18:56:46 local
# sunrise: 2015-09-15 13:32:05 UTC, sunset: 2015-09-16 01:56:46 UTC
# now = sunset - 1s + 1h -> 'after sunset' with offset +1h not true
now = datetime(2015, 9, 16, 2, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunset + 1h -> 'after sunset' with offset +1h true
now = datetime(2015, 9, 16, 2, 56, 46, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = midnight-1s -> 'after sunset' with offset +1h true
now = datetime(2015, 9, 16, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = midnight -> 'after sunset' with offset +1h not true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_and_after_during(hass, calls):
"""
Test if action was after sunset and before sunrise.
This is true from sunrise until sunset.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"before": SUN_EVENT_SUNSET,
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s -> 'after sunrise' + 'before sunset' not true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunset + 1s -> 'after sunrise' + 'before sunset' not true
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = 9AM local -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 16, 16, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
async def test_if_action_before_sunrise_no_offset_kotzebue(hass, calls):
"""
Test if action was before sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
After sunrise is true from sunrise until midnight, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 15, 17, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset_kotzebue(hass, calls):
"""
Test if action was after sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
Before sunrise is true from midnight until sunrise, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise -> 'after sunrise' true
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunrise - 1s -> 'after sunrise' not true
now = datetime(2015, 7, 24, 15, 17, 23, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunrise' not true
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunrise' true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_no_offset_kotzebue(hass, calls):
"""
Test if action was before sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
Before sunset is true from midnight until sunset, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 7, 25, 11, 16, 28, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunset_no_offset_kotzebue(hass, calls):
"""
Test if action was after sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
After sunset is true from sunset until midnight, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunset -> 'after sunset' true
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset - 1s -> 'after sunset' not true
now = datetime(2015, 7, 25, 11, 16, 26, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunset' not true
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunset' true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
| |
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q, Value, Case, When, F
from django.db.models.functions import Concat
from django.utils.translation import gettext_lazy as _, gettext
from django.utils import timezone
from cms.models.pluginmodel import CMSPlugin
from intervaltree import IntervalTree
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from danceschool.core.models import (
PublicEventCategory, SeriesCategory, EventStaffCategory, EventSession,
Event, StaffMember, Registration
)
from danceschool.core.utils.timezone import ensure_localtime
from .constants import GUESTLIST_ADMISSION_CHOICES, GUESTLIST_SORT_CHOICES
class GuestList(models.Model):
name = models.CharField(_('Name'), max_length=200, unique=True)
sortOrder = models.CharField(_('Sort order'), choices=GUESTLIST_SORT_CHOICES, default='Last', max_length=5)
# Rules for which events a guest list applies to
seriesCategories = models.ManyToManyField(
SeriesCategory, verbose_name=_('Series categories'), blank=True
)
eventCategories = models.ManyToManyField(
PublicEventCategory, verbose_name=_('Public event categories'), blank=True
)
eventSessions = models.ManyToManyField(
EventSession, verbose_name=_('Event sessions'), blank=True
)
individualEvents = models.ManyToManyField(
Event, verbose_name=_('Individual events'), blank=True,
related_name='specifiedGuestLists'
)
includeStaff = models.BooleanField(_('Include all scheduled event staff'), default=True, blank=True)
includeRegistrants = models.BooleanField(_('Include event registrants'), default=True, blank=True)
@property
def recentEvents(self):
'''
Get the set of recent and upcoming events to which this list applies.
'''
return Event.objects.filter(
Q(pk__in=self.individualEvents.values_list('pk', flat=True)) |
Q(session__in=self.eventSessions.all()) |
Q(publicevent__category__in=self.eventCategories.all()) |
Q(series__category__in=self.seriesCategories.all())
).filter(
Q(startTime__lte=timezone.now() + timedelta(days=60)) &
Q(endTime__gte=timezone.now() - timedelta(days=60))
)
@property
def currentEvent(self):
'''
Return the first event that hasn't ended yet, or if there are no
future events, the last one to end.
'''
currentEvent = self.recentEvents.filter(endTime__gte=timezone.now()).order_by('startTime').first()
if not currentEvent:
currentEvent = self.recentEvents.filter(
endTime__lte=timezone.now()
).order_by('-endTime').first()
return currentEvent
def appliesToEvent(self, event):
''' Check whether this guest list is applicable to an event. '''
return (
event in self.individualEvents.all() or
event.session in self.eventSessions.all() or
event.category in self.seriesCategories.all() or
event.category in self.eventCategories.all()
)
def getDayStart(self, dateTime):
''' Ensure local time and get the beginning of the day '''
return ensure_localtime(dateTime).replace(hour=0, minute=0, second=0, microsecond=0)
def getComponentFilters(self, component, event=None, dateTime=None):
'''
Get a parsimonious set of intervals and the associated Q() objects
based on the occurrences of a specified event, and the rule that
implicitly defines the start and end of each interval.
'''
# Limit to the staff member or staff category specified by the rule.
if component.staffMember:
filters = Q(pk=component.staffMember.pk)
else:
filters = Q(eventstaffmember__category=component.staffCategory)
# Handle 'Always' and 'EventOnly' rules first, because they do not
# require an analysis of intervals.
if component.admissionRule == 'EventOnly' and event:
# Skip the analysis of intervals and include only those who are
# staffed for the event.
return Q(filters & Q(eventstaffmember__event=event))
elif component.admissionRule in ['Always', 'EventOnly']:
# If 'Always' or no event is specified, include all associated staff
return Q(filters)
# Start with the event occurrence intervals, or with the specified time.
if event:
intervals = [(x.startTime, x.endTime) for x in event.eventoccurrence_set.all()]
elif dateTime:
intervals = [(dateTime, dateTime)]
else:
raise ValueError(_(
'Must provide either an event or a datetime to get interval queries.'
))
if component.admissionRule == 'Day':
# The complete days of each event occurrence
intervals = [
(self.getDayStart(x[0]), self.getDayStart(x[1]) + timedelta(days=1))
for x in intervals
]
elif component.admissionRule == 'Week':
# The complete weeks of each event occurrence
intervals = [
(
self.getDayStart(x[0]) - timedelta(days=x[0].weekday()),
self.getDayStart(x[1]) - timedelta(days=x[1].weekday() - 7)
) for x in intervals
]
elif component.admissionRule == 'Month':
# The complete month of each event occurrence
intervals = [
(
self.getDayStart(x[0]).replace(day=1),
self.getDayStart(x[1]).replace(day=1) + relativedelta(months=1)
) for x in intervals
]
elif component.admissionRule == 'Year':
# The complete years of each event occurrence
intervals = [
(
self.getDayStart(x[0]).replace(month=1, day=1),
self.getDayStart(x[1]).replace(year=x[1].year + 1, month=1, day=1)
) for x in intervals
]
else:
# This is a failsafe that will always evaluate as False.
return Q(pk__isnull=True)
# Use intervaltree to create the most parsimonious set of intervals for this interval
# and then filter on those intervals
intervals = [sorted(x) for x in intervals]
tree = IntervalTree.from_tuples(intervals)
tree.merge_overlaps()
# Since we are OR appending, start with something that is always False.
intervalFilters = Q(pk__isnull=True)
for item in tree.items():
intervalFilters = intervalFilters | Q(
Q(eventstaffmember__event__eventoccurrence__endTime__gte=item[0]) &
Q(eventstaffmember__event__eventoccurrence__startTime__lte=item[1])
)
return Q(filters & intervalFilters)
def getStaffForEvent(self, event=None, filters=Q()):
'''
Get all StaffMembers associated with a specified event.
'''
# Component-by-component, OR append filters to an initial filter that always
# evaluates to False.
components = self.guestlistcomponent_set.all()
component_filters = Q(pk__isnull=True)
# Add prior staff based on the component rule.
for component in components:
if event and self.appliesToEvent(event):
component_filters = component_filters | self.getComponentFilters(component, event=event)
else:
component_filters = component_filters | self.getComponentFilters(component, dateTime=timezone.now())
# Add all event staff if that box is checked (no need for separate components)
if self.includeStaff and event and self.appliesToEvent(event):
component_filters = component_filters | Q(eventstaffmember__event=event)
return StaffMember.objects.filter(component_filters).filter(filters)
def getDescriptionForGuest(self, guest, event=None):
'''
Return a string that indicates the type of guest, depending on the rule
that was used to add the guest to the list.
'''
if isinstance(guest, GuestListName):
return guest.notes or gettext('Manually Added')
elif isinstance(guest, Registration):
return gettext('Registered')
elif isinstance(guest, StaffMember):
if event:
staff_for = guest.eventstaffmember_set.filter(event=event).first()
if staff_for:
return gettext(
'Event Staff: {category}'.format(category=staff_for.category.name)
)
return gettext('Other Staff')
def getListForEvent(self, event=None, filters=Q(), includeRegistrants=True):
'''
Get a union-ed queryset with a list of names associated with a particular event.
'''
names = self.guestlistname_set.annotate(
modelType=Value('GuestListName', output_field=models.CharField()),
guestListId=Value(self.id, output_field=models.IntegerField()),
guestType=Case(
When(notes__isnull=False, then=F('notes')),
default=Value(gettext('Manually Added')),
output_field=models.CharField()
),
).filter(filters).values(
'id', 'modelType', 'guestListId', 'firstName', 'lastName',
'guestType'
).order_by()
# Execute the constructed query and add the names of staff
names = names.union(self.getStaffForEvent(event, filters).annotate(
modelType=Value('StaffMember', output_field=models.CharField()),
guestListId=Value(self.id, output_field=models.IntegerField()),
guestType=Case(
When(
eventstaffmember__event=event,
then=Concat(
Value('Event Staff: '), 'eventstaffmember__category__name'
)
),
default=Value(gettext('Other Staff')),
output_field=models.CharField()
),
).distinct().values(
'id', 'modelType', 'guestListId', 'firstName', 'lastName',
'guestType',
).order_by())
if includeRegistrants and self.includeRegistrants and event and self.appliesToEvent(event):
names = names.union(
Registration.objects.filter(
filters & Q(final=True) & Q(eventregistration__event=event)
).annotate(
modelType=Value('Registration', output_field=models.CharField()),
guestListId=Value(self.id, output_field=models.IntegerField()),
guestType=Value(_('Registered'), output_field=models.CharField()),
).values(
'id', 'modelType', 'guestListId', 'firstName', 'lastName',
'guestType',
).order_by()
)
return names.order_by('lastName', 'firstName')
def __str__(self):
return '%s: %s' % (_('Guest list'), self.name)
class Meta:
ordering = ('name', )
verbose_name = _('Guest list')
verbose_name_plural = _('Guest lists')
class GuestListName(models.Model):
''' Additional names to be manually added to a particular guest list '''
guestList = models.ForeignKey(GuestList, on_delete=models.CASCADE)
firstName = models.CharField(_('First name'), max_length=50)
lastName = models.CharField(_('Last name'), max_length=50)
notes = models.CharField(
_('Notes (optional)'),
help_text=_('These will be included on the list for reference.'),
null=True, blank=True, max_length=200
)
@property
def fullName(self):
return ' '.join([self.firstName or '', self.lastName or ''])
fullName.fget.short_description = _('Name')
def __str__(self):
return '%s: %s' % (_('Guest'), self.fullName)
class Meta:
ordering = ('guestList', 'lastName', 'firstName')
verbose_name = _('Manually-added guest')
verbose_name_plural = _('Manually added guests')
permissions = (
('view_guestlist', _('Can view guest lists')),
(
'checkin_guests',
_('Can check in guests using the JSON check-in view (used by door plugins)')
)
)
class GuestListComponent(models.Model):
guestList = models.ForeignKey(GuestList, on_delete=models.CASCADE)
staffCategory = models.ForeignKey(
EventStaffCategory, verbose_name=_('Category of staff members'),
null=True, blank=True, on_delete=models.CASCADE
)
staffMember = models.ForeignKey(
StaffMember, verbose_name=_('Individual staff member'),
null=True, blank=True, on_delete=models.CASCADE
)
admissionRule = models.CharField(_('Event admission rule'), choices=GUESTLIST_ADMISSION_CHOICES, max_length=10)
def clean(self):
''' Either staffCategory or staffMember must be filled in, but not both. '''
if not self.staffCategory and not self.staffMember:
raise ValidationError(_('Either staff category or staff member must be specified.'))
if self.staffCategory and self.staffMember:
raise ValidationError(_('Specify either a staff category or a staff member, not both.'))
class Meta:
ordering = ('guestList', 'admissionRule')
verbose_name = _('Guest list component')
verbose_name_plural = _('Guest list components')
unique_together = ('guestList', 'staffCategory', 'staffMember')
| |
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : {array_like, poly1d}
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : {None, list of `m` scalars, scalar}, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A ContainerSandbox manages the application and devappserver containers.
This includes their creation, termination, and destruction.
ContainerSandbox is intended to be used inside a "with" statement. Inside
the interior of the "with" statement, the user interact with the containers
via the docker api. It may also be beneficial for the user to perform
system tests in this manner.
"""
# This file conforms to the external style guide
# pylint: disable=bad-indentation, g-bad-import-order
import io
import os
import sys
import time
import docker
import configuration
import container
from .. import utils
from .. import constants
from ..utils import get_logger
# Maximum attempts to health check application container.
MAX_ATTEMPTS = 30
# Default port that the application is expected to listen on inside
# the application container.
DEFAULT_APPLICATION_PORT = 8080
# Time format for naming images/containers
TIME_FMT = '%Y.%m.%d_%H.%M.%S'
# Java offset for the xml file's location, relative to the root
# diretory of the WAR archive
JAVA_OFFSET = 'WEB-INF/'
class ContainerSandbox(object):
"""Sandbox to manage the user application & devappserver containers.
This sandbox aims to leave the docker container space untouched.
Proper usage ensures that application & devappserver containers will
be created, started, stopped, and destroyed. For proper usage, the
ContainerSandbox should be used as a context manager (inside a "with"
statement), or the start and stop functions should be invoked from
within a try-finally context.
"""
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
def __init__(self,
config_file=None,
image_name=None,
application_id=None,
application_port=8080,
admin_port=8000,
clear_datastore=False,
internal_admin_port=32768,
internal_api_port=32769,
internal_proxy_port=32770,
log_path=None,
run_api_server=True,
storage_path='/tmp/app_engine/storage',
nocache=False,
timeout=MAX_ATTEMPTS,
force_version=False):
"""Get the sandbox ready to construct and run the containers.
Args:
config_file: (basestring or None) The relative or full path
to the config_file of the application. At least one of
image_name and config_file must be specified. If image_name is
not specified, this path will be used to help find the
Dockerfile and build the application container.
Therefore, if image_name is not specified, there should
be a Dockerfile in the correct location:
Non-java apps (apps that use .yaml files)
1) The .yaml file must be in the root of the app
directory.
2) The Dockerfile must be in the root of the app
directory.
Java apps (apps that are built off java-compat):
1) The appengine-web.xml file must be in
<root>/WEB-INF/ (where <root> is the root
directory of the WAR archive.)
2) The Dockerfile must be in the root of the WAR
archive.
3) There must be a web.xml file in the same
directory as the appengine-web.xml file.
image_name: (basestring or None) If specified, the sandbox
will run the image associated with image_name instead of
building an image from the specified application_directory.
application_id: (basestring) The application ID is
the unique "appengine application ID" that the app is
identified by, and can be found in the developer's
console. While for deployment purposes, this ID is
important, it's not as important in development. This
ID only controls which datastore, blobstore, etc the
sandbox will use. If the sandbox is run consecutively
with the same application_id, (and of course, the same
storage_path) the datastore, blobstore, taskqueue, etc
will persist assuming their data has not been deleted.
application_port: (int) The port on the docker host that should be
mapped to the application. The application will be
accessible through this port.
admin_port: (int) The port on the docker server host that
should be mapped to the admin server, which runs inside
the devappserver container. The admin panel will be
accessible through this port.
clear_datastore: (bool) Whether or not to clear the datastore.
If True, this eliminates all of the data from the datastore
before running the api server.
internal_admin_port: (int) The port INSIDE the devappserver
container that the admin panel binds to. Because this
is internal to the container, it can be defaulted.
In fact, you shouldn't change it from the default unless
you have a reason to.
internal_api_port: (int) The port INSIDE the devappserver
container that the api server should bind to.
~Same disclaimer as the one for internal_admin_port.~
internal_proxy_port: (int) The port INSIDE the devappserver
container that the proxy should bind to.
~Same disclaimer as the one for internal_admin_port.~
log_path: (basestring or None) The path where the application's
logs should be collected. Note that the application's logs
will be collected EXTERNALLY (ie they will collect in the
docker host's file system) and log_path specifies where
these logs should go. If log_path is None, a timestamped
name will be generated for the log directory.
run_api_server: (bool) Whether or not to run the api server.
If this argument is set to false, the sandbox won't start
a devappserver.
storage_path: (basestring) The path (external to the
containers) where the data associated with the api
server's services - datastore, blobstore, etc - should
collect. Note that this path defaults to
/tmp/appengine/storage, so it should be changed if the data
is intended to persist.
nocache: (bool) Whether or not to use the cache when building
images.
timeout: (int) How many seconds to wait for the application
container to start.
force_version: (bool) Whether or not to continue in the case
of mismatched docker versions.
"""
self.cur_time = time.strftime(TIME_FMT)
self.app_id = (application_id or
time.strftime('%s'))
self.internal_api_port = internal_api_port
self.internal_proxy_port = internal_proxy_port
self.internal_admin_port = internal_admin_port
self.clear_datastore = clear_datastore
self.port = application_port
self.storage_path = storage_path
self.log_path = (
log_path or self.make_timestamped_name(
'/tmp/log/app_engine/app_logs',
self.cur_time))
self.image_name = image_name
self.admin_port = admin_port
self.dclient = utils.get_docker_client()
self.devappserver_container = None
self.app_container = None
self.pinger_container = None
self.nocache = nocache
self.run_devappserver = run_api_server
self.timeout = timeout
if config_file:
self.conf_path = os.path.abspath(config_file)
else:
if not image_name:
raise utils.AppstartAbort('At least one of config_file and '
'image_name must be specified.')
self.conf_path = os.path.join(os.path.dirname(__file__),
'app.yaml')
self.application_configuration = (
configuration.ApplicationConfiguration(self.conf_path))
self.app_dir = self.app_directory_from_config(self.conf_path)
# For Java apps, the xml file must be offset by WEB-INF.
# Otherwise, devappserver will think that it's a non-java app.
self.das_offset = (JAVA_OFFSET if
self.application_configuration.is_java else '')
if not force_version:
utils.check_docker_version(self.dclient)
def __enter__(self):
self.start()
return self
def start(self):
"""Start the sandbox."""
try:
self.create_and_run_containers()
except: # pylint: disable=bare-except
self.stop()
raise
def create_and_run_containers(self):
"""Creates and runs app and (optionally) devappserver containers.
This includes the creation of a new devappserver image, unless
self.run_devappserver is False. If image_name isn't specified, an
image is created for the application as well. Newly made containers
are cleaned up, but newly made images are not.
"""
if self.run_devappserver:
# Devappserver must know APP_ID to properly interface with
# services like datastore, blobstore, etc. It also needs
# to know where to find the config file, which port to
# run the proxy on, and which port to run the api server on.
das_env = {'APP_ID': self.app_id,
'CLEAR_DATASTORE': self.clear_datastore,
'PROXY_PORT': self.internal_proxy_port,
'API_PORT': self.internal_api_port,
'ADMIN_PORT': self.internal_admin_port,
'CONFIG_FILE': os.path.join(
self.das_offset,
os.path.basename(self.conf_path))}
devappserver_image = self.build_devappserver_image()
devappserver_container_name = (
self.make_timestamped_name('devappserver',
self.cur_time))
# The host_config specifies port bindings and volume bindings.
# /storage is bound to the storage_path. Internally, the
# devappserver writes all the db files to /storage. The mapping
# thus allows these files to appear on the host machine. As for
# port mappings, we only want to expose the application (via the
# proxy), and the admin panel.
devappserver_hconf = docker.utils.create_host_config(
port_bindings={
DEFAULT_APPLICATION_PORT: self.port,
self.internal_admin_port: self.admin_port,
},
binds={
self.storage_path: {'bind': '/storage'},
}
)
self.devappserver_container = container.Container(self.dclient)
self.devappserver_container.create(
name=devappserver_container_name,
image=devappserver_image,
ports=[DEFAULT_APPLICATION_PORT, self.internal_admin_port],
volumes=['/storage'],
host_config=devappserver_hconf,
environment=das_env)
self.devappserver_container.start()
get_logger().info('Starting container: %s',
devappserver_container_name)
# The application container needs several environment variables
# in order to start up the application properly, as well as
# look for the api server in the correct place. Notes:
#
# GAE_PARTITION is always dev for development modules.
# GAE_LONG_APP_ID is the "application ID". When devappserver
# is invoked, it can be passed a "--application" flag. This
# application must be consistent with GAE_LONG_APP_ID.
# API_HOST is 0.0.0.0 because application container runs on the
# same network stack as devappserver.
# MODULE_YAML_PATH specifies the path to the app from the
# app directory
# TODO (find in g3 and link to here via comment)
app_env = {'API_HOST': '0.0.0.0',
'API_PORT': self.internal_api_port,
'GAE_LONG_APP_ID': self.app_id,
'GAE_PARTITION': 'dev',
'GAE_MODULE_INSTANCE': '0',
'MODULE_YAML_PATH': os.path.basename(self.conf_path),
'GAE_MODULE_NAME': 'default', # TODO(gouzenko) parse app.yaml
'GAE_MODULE_VERSION': '1',
'GAE_SERVER_PORT': '8080',
'USE_MVM_AGENT': 'true'}
# Build from the application directory iff image_name is not
# specified.
app_image = self.image_name or self.build_app_image()
app_container_name = self.make_timestamped_name('test_app',
self.cur_time)
# If devappserver is running, hook up the app to it.
if self.run_devappserver:
network_mode = ('container:%s' %
self.devappserver_container.get_id())
ports = port_bindings = None
else:
port_bindings = {DEFAULT_APPLICATION_PORT: self.port}
ports = [DEFAULT_APPLICATION_PORT]
network_mode = None
app_hconf = docker.utils.create_host_config(
port_bindings=port_bindings,
binds={
self.log_path: {'bind': '/var/log/app_engine'}
},
)
self.app_container = container.ApplicationContainer(
self.application_configuration,
self.dclient)
self.app_container.create(
name=app_container_name,
image=app_image,
ports=ports,
volumes=['/var/log/app_engine'],
host_config=app_hconf,
environment=app_env)
# Start as a shared network container, putting the application
# on devappserver's network stack. (If devappserver is not
# running, network_mode is None).
try:
self.app_container.start(network_mode=network_mode)
except utils.AppstartAbort:
if self.run_devappserver:
self.abort_if_not_running(self.devappserver_container)
raise
# Construct a pinger container and bind it to the application's network
# stack. This will allow the pinger to attempt to connect to the
# application's ports.
pinger_name = self.make_timestamped_name('pinger', self.cur_time)
self.pinger_container = container.PingerContainer(self.dclient)
try:
self.pinger_container.create(name=pinger_name,
image=constants.PINGER_IMAGE)
except utils.AppstartAbort:
if not utils.find_image(constants.PINGER_IMAGE):
raise utils.AppstartAbort('No pinger image found. '
'Did you forget to run "appstart '
'init"? ')
raise
try:
self.pinger_container.start(
network_mode='container:{}'.format(self.app_container.get_id()))
except utils.AppstartAbort:
self.abort_if_not_running(self.app_container)
raise
self.wait_for_start()
self.app_container.stream_logs()
def stop(self):
"""Remove containers to clean up the environment."""
self.stop_and_remove_containers()
@staticmethod
def abort_if_not_running(cont):
if not cont.running():
cont.stream_logs(stream=False)
raise utils.AppstartAbort('{0} stopped '
'prematurely'.format(cont.name))
def __exit__(self, etype, value, traceback):
self.stop()
def stop_and_remove_containers(self):
"""Stop and remove application containers."""
containers_to_remove = [self.app_container,
self.devappserver_container,
self.pinger_container]
for cont in containers_to_remove:
if cont and cont.running():
cont_id = cont.get_id()
get_logger().info('Stopping %s', cont_id)
cont.kill()
get_logger().info('Removing %s', cont_id)
cont.remove()
def wait_for_start(self):
"""Wait for the app container to start.
Raises:
utils.AppstartAbort: If the application server doesn't
start after timeout reach it on 8080.
"""
host = self.app_container.host
get_logger().info('Waiting for application to listen on port 8080')
attempt = 1
graphical = sys.stdout.isatty()
def print_if_graphical(message):
if graphical:
sys.stdout.write(message)
sys.stdout.flush()
def exit_loop_with_error(error):
print_if_graphical('\n')
raise utils.AppstartAbort(error)
print_if_graphical('Waiting ')
while True:
if attempt > self.timeout:
exit_loop_with_error('The application server timed out.')
if self.run_devappserver:
self.abort_if_not_running(self.devappserver_container)
self.abort_if_not_running(self.app_container)
if attempt % 4 == 0:
# \033[3D moves the cursor left 3 times. \033[K clears to the
# end of the line. So, every 4th ping, clear the dots.
print_if_graphical('\033[3D\033[K')
else:
print_if_graphical('.')
if self.pinger_container.ping_application_container():
print_if_graphical('\n')
break
attempt += 1
time.sleep(1)
get_logger().info('Your application is live. '
'Access it at: {0}:{1}'.format(host, str(self.port)))
@staticmethod
def app_directory_from_config(full_config_file_path):
"""Get the application root directory based on the config file.
Args:
full_config_file_path: (basestring) The absolute path to a
config file.
Returns:
(basestring): The application's root directory.
"""
conf_file_dir = os.path.dirname(full_config_file_path)
if full_config_file_path.endswith('.yaml'):
return conf_file_dir
else:
return os.path.dirname(conf_file_dir)
def build_app_image(self):
"""Build the app image from the Dockerfile in the root directory.
Returns:
(basestring) The name of the new app image.
"""
name = self.make_timestamped_name('app_image', self.cur_time)
utils.build_from_directory(self.app_dir, name)
return name
def build_devappserver_image(self):
"""Build a layer over devappserver to include application files.
The new image contains the user's config files.
Returns:
(basestring) The name of the new devappserver image.
"""
# Collect the files that should be added to the docker build
# context.
files_to_add = {self.conf_path: None}
if self.application_configuration.is_java:
files_to_add[self.get_web_xml(self.conf_path)] = None
# The Dockerfile should add the config files to
# the /app folder in devappserver's container.
dockerfile = """
FROM %(das_repo)s
ADD %(path)s/* %(dest)s
""" %{'das_repo': constants.DEVAPPSERVER_IMAGE,
'path': os.path.dirname(self.conf_path),
'dest': os.path.join('/app', self.das_offset)}
# Construct a file-like object from the Dockerfile.
dockerfile_obj = io.BytesIO(dockerfile.encode('utf-8'))
build_context = utils.make_tar_build_context(dockerfile_obj,
files_to_add)
image_name = self.make_timestamped_name('devappserver_image',
self.cur_time)
# Build the devappserver image.
res = self.dclient.build(fileobj=build_context,
custom_context=True,
rm=True,
nocache=self.nocache,
tag=image_name)
# Log the output of the build.
try:
utils.log_and_check_build_results(res, image_name)
except utils.AppstartAbort:
if not utils.find_image(constants.DEVAPPSERVER_IMAGE):
raise utils.AppstartAbort('No devappserver base image found. '
'Did you forget to run "appstart '
'init"?')
raise
return image_name
@staticmethod
def get_web_xml(full_config_file_path):
"""Get (what should be) the path of the web.xml file.
Args:
full_config_file_path: (basestring) The absolute path to a
.xml config file.
Returns:
(basestring) The full path to the web.xml file.
"""
return os.path.join(os.path.dirname(full_config_file_path),
'web.xml')
@staticmethod
def make_timestamped_name(base, time_str):
"""Construct a name for an image or container.
Note that naming is functionally unimportant and
serves only to make the output of 'docker images'
and 'docker ps' look cleaner.
Args:
base: (basestring) The prefix of the name.
time_str: (basestring) The name's timestamp.
Returns:
(basestring) The name of the image or container.
"""
return '%s.%s' % (base, time_str)
| |
#!/usr/bin/env python
#
# ROS node to control Nao's footsteps (testing for NaoQI 1.12)
#
# Copyright 2012 Armin Hornung and Johannes Garimort, University of Freiburg
# http://www.ros.org/wiki/nao_driver
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the University of Freiburg nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import rospy
import time
from nao_driver import NaoNode
import math
from math import fabs
import actionlib
from humanoid_nav_msgs.msg import *
from humanoid_nav_msgs.srv import StepTargetService, StepTargetServiceResponse
from humanoid_nav_msgs.srv import ClipFootstep, ClipFootstepResponse
from nao_driver.util import ( startWalkPose, clip_footstep_tuple )
LEG_LEFT = "LLeg"
LEG_RIGHT = "RLeg"
FLOAT_CMP_THR = 0.000001
STEP_TIME = 0.5
def float_equ(a, b):
return abs(a - b) <= FLOAT_CMP_THR
# redefine StepTarget by inheritance
class StepTarget(StepTarget):
def __init__(self, x=0.0, y=0.0, theta=0.0, leg=0.0):
super(StepTarget, self).__init__()
self.pose.x = round(x, 4)
self.pose.y = round(y, 4)
self.pose.theta = round(theta, 4)
self.leg = leg
def __eq__(self, a):
return (float_equ(self.pose.x, a.pose.x) and
float_equ(self.pose.y, a.pose.y) and
float_equ(self.pose.theta, a.pose.theta) and
self.leg == a.leg)
def __ne__(self, a):
return not (self == a)
def __str__(self):
return "(%f, %f, %f, %i)" % (self.pose.x, self.pose.y, self.pose.theta,
self.leg)
def __repr__(self):
return self.__str__()
class NaoFootsteps(NaoNode):
def __init__(self):
NaoNode.__init__(self)
# ROS initialization:
rospy.init_node('nao_footsteps')
self.connectNaoQi()
# initial stiffness (defaults to 0 so it doesn't strain the robot when
# no teleoperation is running)
# set to 1.0 if you want to control the robot immediately
initStiffness = rospy.get_param('~init_stiffness', 0.0)
# TODO: parameterize
if initStiffness > 0.0 and initStiffness <= 1.0:
self.motionProxy.stiffnessInterpolation('Body', initStiffness, 0.5)
# last: ROS subscriptions (after all vars are initialized)
rospy.Subscriber("footstep", StepTarget, self.handleStep, queue_size=50)
# ROS services (blocking functions)
self.stepToSrv = rospy.Service("footstep_srv", StepTargetService,
self.handleStepSrv)
self.clipSrv = rospy.Service("clip_footstep_srv", ClipFootstep,
self.handleClipSrv)
# Initialize action server
self.actionServer = actionlib.SimpleActionServer(
"footsteps_execution",
ExecFootstepsAction,
execute_cb=self.footstepsExecutionCallback,
auto_start=False)
self.actionServer.start()
rospy.loginfo("nao_footsteps initialized")
def connectNaoQi(self):
"""(re-) connect to NaoQI"""
rospy.loginfo("Connecting to NaoQi at %s:%d", self.pip, self.pport)
self.motionProxy = self.getProxy("ALMotion")
if self.motionProxy is None:
exit(1)
def stopWalk(self):
"""
Stops the current walking bahavior and blocks until the clearing is
complete.
"""
try:
self.motionProxy.setWalkTargetVelocity(0.0, 0.0, 0.0, self.stepFrequency)
self.motionProxy.waitUntilWalkIsFinished()
except RuntimeError,e:
print "An error has been caught"
print e
return False
return True
def handleStep(self, data):
rospy.loginfo("Step leg: %d; target: %f %f %f", data.leg, data.pose.x,
data.pose.y, data.pose.theta)
try:
if data.leg == StepTarget.right:
leg = [LEG_RIGHT]
elif data.leg == StepTarget.left:
leg = [LEG_LEFT]
else:
rospy.logerr("Received a wrong leg constant: %d, ignoring step",
" command", data.leg)
return
footStep = [[data.pose.x, data.pose.y, data.pose.theta]]
timeList = [STEP_TIME]
self.motionProxy.setFootSteps(leg, footStep, timeList, False)
time.sleep(0.1)
print self.motionProxy.getFootSteps()
self.motionProxy.waitUntilWalkIsFinished()
return True
except RuntimeError, e:
rospy.logerr("Exception caught in handleStep:\n%s", e)
return False
def handleStepSrv(self, req):
if self.handleStep(req.step):
return StepTargetServiceResponse()
else:
return None
def handleClipping(self, step):
is_left_support = step.leg != StepTarget.left
unclipped_step = (step.pose.x, step.pose.y, step.pose.theta)
step.pose.x, step.pose.y, step.pose.theta = clip_footstep_tuple(
unclipped_step, is_left_support)
return step
def handleClipSrv(self, req):
resp = ClipFootstepResponse()
resp.step = self.handleClipping(req.step)
return resp
def footstepsExecutionCallback(self, goal):
def update_feedback(feedback, executed_footsteps):
# check if an footstep has been performed
if not len(executed_footsteps):
return
# use the last footstep in the list since this might be the new one
# (NOTE: if one step is missed here, increase the feedback rate)
leg, time, (x, y, theta) = executed_footsteps[-1]
# check if footstep information is up-to-date
if not float_equ(time, STEP_TIME):
return
leg = (StepTarget.right if leg == LEG_RIGHT else
StepTarget.left)
step = StepTarget(x, y, theta, leg)
# add the footstep only if it is a new one
try:
if feedback.executed_footsteps[-1] == step:
return
except IndexError:
pass
feedback.executed_footsteps.append(step)
legs = []
steps = []
time_list = []
for step in goal.footsteps:
if step.leg == StepTarget.right:
legs.append(LEG_RIGHT)
elif step.leg == StepTarget.left:
legs.append(LEG_LEFT)
else:
rospy.logerr("Received a wrong leg constant: %d, ignoring step "
"command", step.leg)
return
steps.append([round(step.pose.x, 4),
round(step.pose.y, 4),
round(step.pose.theta, 4)])
try:
time_list.append(time_list[-1] + STEP_TIME)
except IndexError:
time_list.append(STEP_TIME)
rospy.loginfo("Start executing footsteps %s",
[[x, y, theta, leg] for (x, y, theta), leg in
zip(steps, legs)])
feedback = ExecFootstepsFeedback()
result = ExecFootstepsResult()
success = True
self.motionProxy.setFootSteps(legs, steps, time_list, True)
while self.motionProxy.walkIsActive():
# handle preempt requests
if self.actionServer.is_preempt_requested():
self.motionProxy.stopWalk()
self.actionServer.set_preempted()
rospy.loginfo("Preempting footstep execution");
success = False
break
# get execution information from the robot and update the feedback
(_, executed_footsteps, _) = self.motionProxy.getFootSteps()
update_feedback(feedback, executed_footsteps)
self.actionServer.publish_feedback(feedback)
rospy.Rate(goal.feedback_frequency).sleep()
if success:
result.executed_footsteps = feedback.executed_footsteps
self.actionServer.set_succeeded(result)
if __name__ == '__main__':
walker = NaoFootsteps()
rospy.loginfo("nao_footsteps running...")
rospy.spin()
rospy.loginfo("nao_footsteps stopping...")
walker.stopWalk()
rospy.loginfo("nao_footsteps stopped.")
exit(0)
| |
# -*- coding: utf-8 -*-
from zope.interface import implementer
from datetime import timedelta
from iso8601 import parse_date
from pyramid.security import Allow
from schematics.types import StringType, FloatType, IntType, URLType, BooleanType
from schematics.types.compound import ModelType
from schematics.types.serializable import serializable
from schematics.exceptions import ValidationError
from schematics.transforms import whitelist
from barbecue import vnmax
from openprocurement.api.utils import get_now, get_root
from openprocurement.api.constants import TZ
from openprocurement.api.validation import (
validate_cpv_group, validate_items_uniq
)
from openprocurement.api.models import (
Value, Model, SifterListType,
ListType, Period
)
from openprocurement.api.models import (
plain_role, listing_role,
schematics_default_role
)
from openprocurement.tender.core.models import (
Tender as BaseTender, EnquiryPeriod, PeriodStartEndRequired,
Question, Feature, Guarantee, BaseLot
)
from openprocurement.tender.core.models import (
get_tender, view_role, auction_view_role, auction_post_role,
auction_patch_role, enquiries_role, chronograph_role,
chronograph_view_role, Administrator_role,
embedded_lot_role, default_lot_role,
validate_features_uniq, validate_lots_uniq,
bids_validation_wrapper
)
from openprocurement.tender.core.utils import (
calc_auction_end_time,
calculate_business_date,
has_unanswered_questions,
has_unanswered_complaints
)
from openprocurement.tender.core.constants import (
CPV_ITEMS_CLASS_FROM
)
from openprocurement.tender.openua.models import (
Tender as OpenUATender
)
from openprocurement.tender.openua.utils import (
calculate_normalized_date
)
from openprocurement.tender.openua.constants import (
COMPLAINT_SUBMIT_TIME,
ENQUIRY_STAND_STILL_TIME,
AUCTION_PERIOD_TIME,
)
from openprocurement.tender.openeu.models import (
IAboveThresholdEUTender, Bid as BaseEUBid,
LotValue as BaseLotValue,
ComplaintModelType, Item, TenderAuctionPeriod,
ProcuringEntity, Award as BaseEUAward, Complaint,
Cancellation, OpenEUDocument as Document,
Qualification, LotAuctionPeriod,
Contract as BaseEUContract, BidModelType
)
from openprocurement.tender.openeu.models import (
eu_role, edit_role_eu, create_role_eu,
pre_qualifications_role, eu_auction_role
)
from openprocurement.tender.openeu.constants import (
TENDERING_DURATION,
QUESTIONS_STAND_STILL
)
from openprocurement.tender.esco.utils import calculate_npv
class IESCOTender(IAboveThresholdEUTender):
""" Marker interface for ESCO tenders """
class Lot(BaseLot):
class Options:
roles = {
'create': whitelist('id', 'title', 'title_en', 'title_ru', 'description', 'description_en', 'description_ru', 'minValue', 'guarantee', 'minimalStep'),
'edit': whitelist('title', 'title_en', 'title_ru', 'description', 'description_en', 'description_ru', 'minValue', 'guarantee', 'minimalStep'),
'embedded': embedded_lot_role,
'view': default_lot_role,
'default': default_lot_role,
'auction_view': default_lot_role,
'auction_patch': whitelist('id', 'auctionUrl'),
'chronograph': whitelist('id', 'auctionPeriod'),
'chronograph_view': whitelist('id', 'auctionPeriod', 'numberOfBids', 'status'),
}
minValue = ModelType(Value, required=True)
minimalStep = ModelType(Value, required=True)
auctionPeriod = ModelType(LotAuctionPeriod, default={})
auctionUrl = URLType()
guarantee = ModelType(Guarantee)
@serializable
def numberOfBids(self):
"""A property that is serialized by schematics exports."""
bids = [
bid
for bid in self.__parent__.bids
if self.id in [i.relatedLot for i in bid.lotValues if i.status in ["active", "pending"]] and bid.status in ["active", "pending"]
]
return len(bids)
@serializable(serialized_name="guarantee", serialize_when_none=False, type=ModelType(Guarantee))
def lot_guarantee(self):
if self.guarantee:
currency = self.__parent__.guarantee.currency if self.__parent__.guarantee else self.guarantee.currency
return Guarantee(dict(amount=self.guarantee.amount, currency=currency))
@serializable(serialized_name="minimalStep", type=ModelType(Value))
def lot_minimalStep(self):
return Value(dict(amount=self.minimalStep.amount,
currency=self.__parent__.minimalStep.currency,
valueAddedTaxIncluded=self.__parent__.minimalStep.valueAddedTaxIncluded))
@serializable(serialized_name="minValue", type=ModelType(Value))
def lot_minValue(self):
return Value(dict(amount=self.minValue.amount,
currency=self.__parent__.minValue.currency,
valueAddedTaxIncluded=self.__parent__.minValue.valueAddedTaxIncluded))
def validate_minimalStep(self, data, value):
if value and value.amount and data.get('minValue'):
if data.get('minValue').amount < value.amount:
raise ValidationError(u"value should be less than minValue of lot")
class ESCOValue(Value):
class Options:
roles = {
'create': whitelist('amount', 'amount_npv', 'yearlyPayments', 'annualCostsReduction', 'contractDuration', 'currency', 'valueAddedTaxIncluded'),
'edit': whitelist('amount', 'amount_npv', 'yearlyPayments', 'annualCostsReduction', 'contractDuration', 'currency', 'valueAddedTaxIncluded'),
'auction_view': whitelist('amount', 'yearlyPayments', 'annualCostsReduction', 'contractDuration', 'currency', 'valueAddedTaxIncluded'),
'auction_post': whitelist('amount', 'yearlyPayments', 'annualCostsReduction', 'contractDuration', 'currency', 'valueAddedTaxIncluded'),
}
amount = FloatType(required=False, min_value=0) # Amount as a number.
yearlyPayments = FloatType(min_value=0.8, max_value=0.9, required=True) # The percentage of annual payments in favor of Bidder
annualCostsReduction = FloatType(min_value=0, required=True) # Buyer's annual costs reduction
contractDuration = IntType(min_value=1, max_value=15, required=True)
@serializable(serialized_name="amount")
def amount_npv(self):
""" Calculated energy service contract perfomance indicator """
return calculate_npv(get_root(self).NBUdiscountRate,
self.annualCostsReduction,
self.yearlyPayments,
self.contractDuration)
class LotValue(BaseLotValue):
value = ModelType(ESCOValue, required=True)
def validate_value(self, data, value):
if value and isinstance(data['__parent__'], Model) and (data['__parent__'].status not in ('invalid', 'deleted', 'draft')) and data['relatedLot']:
lots = [i for i in get_tender(data['__parent__']).lots if i.id == data['relatedLot']]
if not lots:
return
lot = lots[0]
tender = lot['__parent__']
amount = calculate_npv(tender.NBUdiscountRate, value.annualCostsReduction, value.yearlyPayments, value.contractDuration) #XXX: Calculating value.amount manually
if lot.minValue.amount > amount:
raise ValidationError(u"value of bid should be greater than minValue of lot")
if lot.get('minValue').currency != value.currency:
raise ValidationError(u"currency of bid should be identical to currency of minValue of lot")
if lot.get('minValue').valueAddedTaxIncluded != value.valueAddedTaxIncluded:
raise ValidationError(u"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of minValue of lot")
class Contract(BaseEUContract):
"""ESCO EU contract model"""
value = ModelType(ESCOValue)
class Award(BaseEUAward):
"""ESCO EU award model"""
value = ModelType(ESCOValue)
class Bid(BaseEUBid):
""" ESCO EU bid model """
value = ModelType(ESCOValue)
lotValues = ListType(ModelType(LotValue), default=list())
@bids_validation_wrapper
def validate_value(self, data, value):
if isinstance(data['__parent__'], Model):
tender = data['__parent__']
if tender.lots:
if value:
raise ValidationError(u'value should be posted for each lot of bid')
else:
if not value:
raise ValidationError(u'This field is required.')
amount = calculate_npv(tender.NBUdiscountRate, value.annualCostsReduction, value.yearlyPayments, value.contractDuration) #XXX: Calculating value.amount manually
if tender.minValue.amount > amount:
raise ValidationError(u'value of bid should be greater than minValue of tender')
if tender.get('minValue').currency != value.currency:
raise ValidationError(u"currency of bid should be identical to currency of minValue of tender")
if tender.get('minValue').valueAddedTaxIncluded != value.valueAddedTaxIncluded:
raise ValidationError(u"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of minValue of tender")
@implementer(IESCOTender)
class Tender(BaseTender):
""" ESCO EU Tender model """
class Options:
roles = {
'plain': plain_role,
'create': create_role_eu,
'edit': edit_role_eu,
'edit_draft': edit_role_eu,
'edit_active.tendering': edit_role_eu,
'edit_active.pre-qualification': whitelist('status'),
'edit_active.pre-qualification.stand-still': whitelist(),
'edit_active.auction': whitelist(),
'edit_active.qualification': whitelist(),
'edit_active.awarded': whitelist(),
'edit_complete': whitelist(),
'edit_unsuccessful': whitelist(),
'edit_cancelled': whitelist(),
'view': view_role,
'listing': listing_role,
'auction_view': auction_view_role,
'auction_post': auction_post_role,
'auction_patch': auction_patch_role,
'draft': enquiries_role,
'active.tendering': enquiries_role,
'active.pre-qualification': pre_qualifications_role,
'active.pre-qualification.stand-still': pre_qualifications_role,
'active.auction': pre_qualifications_role,
'active.qualification': view_role,
'active.awarded': view_role,
'complete': view_role,
'unsuccessful': view_role,
'cancelled': view_role,
'chronograph': chronograph_role,
'chronograph_view': chronograph_view_role,
'Administrator': Administrator_role,
'default': schematics_default_role,
'contracting': whitelist('doc_id', 'owner'),
}
procurementMethodType = StringType(default="esco.EU")
title_en = StringType(required=True, min_length=1)
items = ListType(ModelType(Item), required=True, min_size=1, validators=[validate_cpv_group, validate_items_uniq]) # The goods and services to be purchased, broken into line items wherever possible. Items should not be duplicated, but a quantity of 2 specified instead.
minValue = ModelType(Value, required=True) # The total estimated value of the procurement.
enquiryPeriod = ModelType(EnquiryPeriod, required=False)
tenderPeriod = ModelType(PeriodStartEndRequired, required=True)
auctionPeriod = ModelType(TenderAuctionPeriod, default={})
hasEnquiries = BooleanType() # A Yes/No field as to whether enquiries were part of tender process.
awardPeriod = ModelType(Period) # The date or period on which an award is anticipated to be made.
numberOfBidders = IntType() # The number of unique tenderers who participated in the tender
bids = SifterListType(BidModelType(Bid), default=list(), filter_by='status', filter_in_values=['invalid', 'invalid.pre-qualification', 'deleted']) # A list of all the companies who entered submissions for the tender.
procuringEntity = ModelType(ProcuringEntity, required=True) # The entity managing the procurement, which may be different from the buyer who is paying / using the items being procured.
awards = ListType(ModelType(Award), default=list())
contracts = ListType(ModelType(Contract), default=list())
minimalStep = ModelType(Value, required=True)
questions = ListType(ModelType(Question), default=list())
complaints = ListType(ComplaintModelType(Complaint), default=list())
auctionUrl = URLType()
cancellations = ListType(ModelType(Cancellation), default=list())
features = ListType(ModelType(Feature), validators=[validate_features_uniq])
lots = ListType(ModelType(Lot), default=list(), validators=[validate_lots_uniq])
guarantee = ModelType(Guarantee)
documents = ListType(ModelType(Document), default=list()) # All documents and attachments related to the tender.
qualifications = ListType(ModelType(Qualification), default=list())
qualificationPeriod = ModelType(Period)
status = StringType(choices=['draft', 'active.tendering', 'active.pre-qualification', 'active.pre-qualification.stand-still', 'active.auction',
'active.qualification', 'active.awarded', 'complete', 'cancelled', 'unsuccessful'], default='active.tendering')
NBUdiscountRate = FloatType(required=True, min_value=0, max_value=0.99)
create_accreditation = 3
edit_accreditation = 4
procuring_entity_kinds = ['general', 'special', 'defense']
block_tender_complaint_status = OpenUATender.block_tender_complaint_status
block_complaint_status = OpenUATender.block_complaint_status
def __local_roles__(self):
roles = dict([('{}_{}'.format(self.owner, self.owner_token), 'tender_owner')])
for i in self.bids:
roles['{}_{}'.format(i.owner, i.owner_token)] = 'bid_owner'
return roles
def __acl__(self):
acl = [
(Allow, '{}_{}'.format(i.owner, i.owner_token), 'create_qualification_complaint')
for i in self.bids
if i.status in ['active', 'unsuccessful']
]
acl.extend([
(Allow, '{}_{}'.format(i.owner, i.owner_token), 'create_award_complaint')
for i in self.bids
if i.status == 'active'
])
acl.extend([
(Allow, '{}_{}'.format(self.owner, self.owner_token), 'edit_tender'),
(Allow, '{}_{}'.format(self.owner, self.owner_token), 'upload_tender_documents'),
(Allow, '{}_{}'.format(self.owner, self.owner_token), 'edit_complaint'),
])
return acl
@serializable(serialized_name="enquiryPeriod", type=ModelType(EnquiryPeriod))
def tender_enquiryPeriod(self):
endDate = calculate_business_date(self.tenderPeriod.endDate, -QUESTIONS_STAND_STILL, self)
return EnquiryPeriod(dict(startDate=self.tenderPeriod.startDate,
endDate=endDate,
invalidationDate=self.enquiryPeriod and self.enquiryPeriod.invalidationDate,
clarificationsUntil=calculate_business_date(endDate, ENQUIRY_STAND_STILL_TIME, self, True)))
@serializable(type=ModelType(Period))
def complaintPeriod(self):
normalized_end = calculate_normalized_date(self.tenderPeriod.endDate, self)
return Period(dict(startDate=self.tenderPeriod.startDate, endDate=calculate_business_date(normalized_end, -COMPLAINT_SUBMIT_TIME, self)))
@serializable(serialize_when_none=False)
def next_check(self):
now = get_now()
checks = []
if self.status == 'active.tendering' and self.tenderPeriod.endDate and \
not has_unanswered_complaints(self) and not has_unanswered_questions(self):
checks.append(self.tenderPeriod.endDate.astimezone(TZ))
elif self.status == 'active.pre-qualification.stand-still' and self.qualificationPeriod and self.qualificationPeriod.endDate:
active_lots = [lot.id for lot in self.lots if lot.status == 'active'] if self.lots else [None]
if not any([
i.status in self.block_complaint_status
for q in self.qualifications
for i in q.complaints
if q.lotID in active_lots
]):
checks.append(self.qualificationPeriod.endDate.astimezone(TZ))
elif not self.lots and self.status == 'active.auction' and self.auctionPeriod and self.auctionPeriod.startDate and not self.auctionPeriod.endDate:
if now < self.auctionPeriod.startDate:
checks.append(self.auctionPeriod.startDate.astimezone(TZ))
elif now < calc_auction_end_time(self.numberOfBids, self.auctionPeriod.startDate).astimezone(TZ):
checks.append(calc_auction_end_time(self.numberOfBids, self.auctionPeriod.startDate).astimezone(TZ))
elif self.lots and self.status == 'active.auction':
for lot in self.lots:
if lot.status != 'active' or not lot.auctionPeriod or not lot.auctionPeriod.startDate or lot.auctionPeriod.endDate:
continue
if now < lot.auctionPeriod.startDate:
checks.append(lot.auctionPeriod.startDate.astimezone(TZ))
elif now < calc_auction_end_time(lot.numberOfBids, lot.auctionPeriod.startDate).astimezone(TZ):
checks.append(calc_auction_end_time(lot.numberOfBids, lot.auctionPeriod.startDate).astimezone(TZ))
elif not self.lots and self.status == 'active.awarded' and not any([
i.status in self.block_complaint_status
for i in self.complaints
]) and not any([
i.status in self.block_complaint_status
for a in self.awards
for i in a.complaints
]):
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in self.awards
if a.complaintPeriod.endDate
]
last_award_status = self.awards[-1].status if self.awards else ''
if standStillEnds and last_award_status == 'unsuccessful':
checks.append(max(standStillEnds))
elif self.lots and self.status in ['active.qualification', 'active.awarded'] and not any([
i.status in self.block_complaint_status and i.relatedLot is None
for i in self.complaints
]):
for lot in self.lots:
if lot['status'] != 'active':
continue
lot_awards = [i for i in self.awards if i.lotID == lot.id]
pending_complaints = any([
i['status'] in self.block_complaint_status and i.relatedLot == lot.id
for i in self.complaints
])
pending_awards_complaints = any([
i.status in self.block_complaint_status
for a in lot_awards
for i in a.complaints
])
standStillEnds = [
a.complaintPeriod.endDate.astimezone(TZ)
for a in lot_awards
if a.complaintPeriod.endDate
]
last_award_status = lot_awards[-1].status if lot_awards else ''
if not pending_complaints and not pending_awards_complaints and standStillEnds and last_award_status == 'unsuccessful':
checks.append(max(standStillEnds))
if self.status.startswith('active'):
for award in self.awards:
if award.status == 'active' and not any([i.awardID == award.id for i in self.contracts]):
checks.append(award.date)
return min(checks).isoformat() if checks else None
@serializable
def numberOfBids(self):
"""A property that is serialized by schematics exports."""
return len([bid for bid in self.bids if bid.status in ("active", "pending",)])
@serializable(serialized_name="minValue", type=ModelType(Value))
def tender_minValue(self):
return Value(dict(amount=sum([i.minValue.amount for i in self.lots]),
currency=self.minValue.currency,
valueAddedTaxIncluded=self.minValue.valueAddedTaxIncluded)) if self.lots else self.minValue
@serializable(serialized_name="guarantee", serialize_when_none=False, type=ModelType(Guarantee))
def tender_guarantee(self):
if self.lots:
lots_amount = [i.guarantee.amount for i in self.lots if i.guarantee]
if not lots_amount:
return self.guarantee
guarantee = {'amount': sum(lots_amount)}
lots_currency = [i.guarantee.currency for i in self.lots if i.guarantee]
guarantee['currency'] = lots_currency[0] if lots_currency else None
if self.guarantee:
guarantee['currency'] = self.guarantee.currency
return Guarantee(guarantee)
else:
return self.guarantee
@serializable(serialized_name="minimalStep", type=ModelType(Value))
def tender_minimalStep(self):
return Value(dict(amount=min([i.minimalStep.amount for i in self.lots]),
currency=self.minimalStep.currency,
valueAddedTaxIncluded=self.minimalStep.valueAddedTaxIncluded)) if self.lots else self.minimalStep
def validate_items(self, data, items):
cpv_336_group = items[0].classification.id[:3] == '336' if items else False
if not cpv_336_group and (data.get('revisions')[0].date if data.get('revisions') else get_now()) > CPV_ITEMS_CLASS_FROM and items and len(set([i.classification.id[:4] for i in items])) != 1:
raise ValidationError(u"CPV class of items should be identical")
else:
validate_cpv_group(items)
def validate_features(self, data, features):
if features and data['lots'] and any([
round(vnmax([
i
for i in features
if i.featureOf == 'tenderer' or i.featureOf == 'lot' and i.relatedItem == lot['id'] or i.featureOf == 'item' and i.relatedItem in [j.id for j in data['items'] if j.relatedLot == lot['id']]
]), 15) > 0.3
for lot in data['lots']
]):
raise ValidationError(u"Sum of max value of all features for lot should be less then or equal to 30%")
elif features and not data['lots'] and round(vnmax(features), 15) > 0.3:
raise ValidationError(u"Sum of max value of all features should be less then or equal to 30%")
def validate_auctionUrl(self, data, url):
if url and data['lots']:
raise ValidationError(u"url should be posted for each lot")
def validate_minimalStep(self, data, value):
if value and value.amount and data.get('minValue'):
if data.get('minValue').amount < value.amount:
raise ValidationError(u"value should be less than minValue of tender")
if data.get('minValue').currency != value.currency:
raise ValidationError(u"currency should be identical to currency of minValue of tender")
if data.get('minValue').valueAddedTaxIncluded != value.valueAddedTaxIncluded:
raise ValidationError(u"valueAddedTaxIncluded should be identical to valueAddedTaxIncluded of minValue of tender")
def validate_tenderPeriod(self, data, period):
# if data['_rev'] is None when tender was created just now
if not data['_rev'] and calculate_business_date(get_now(), -timedelta(minutes=10)) >= period.startDate:
raise ValidationError(u"tenderPeriod.startDate should be in greater than current date")
if period and calculate_business_date(period.startDate, TENDERING_DURATION, data) > period.endDate:
raise ValidationError(u"tenderPeriod should be greater than {} days".format(TENDERING_DAYS))
def validate_awardPeriod(self, data, period):
if period and period.startDate and data.get('auctionPeriod') and data.get('auctionPeriod').endDate and period.startDate < data.get('auctionPeriod').endDate:
raise ValidationError(u"period should begin after auctionPeriod")
if period and period.startDate and data.get('tenderPeriod') and data.get('tenderPeriod').endDate and period.startDate < data.get('tenderPeriod').endDate:
raise ValidationError(u"period should begin after tenderPeriod")
def validate_lots(self, data, value):
if len(set([lot.guarantee.currency for lot in value if lot.guarantee])) > 1:
raise ValidationError(u"lot guarantee currency should be identical to tender guarantee currency")
def check_auction_time(self):
if self.auctionPeriod and self.auctionPeriod.startDate and self.auctionPeriod.shouldStartAfter \
and self.auctionPeriod.startDate > calculate_business_date(parse_date(self.auctionPeriod.shouldStartAfter), AUCTION_PERIOD_TIME, self, True):
self.auctionPeriod.startDate = None
for lot in self.lots:
if lot.auctionPeriod and lot.auctionPeriod.startDate and lot.auctionPeriod.shouldStartAfter \
and lot.auctionPeriod.startDate > calculate_business_date(parse_date(lot.auctionPeriod.shouldStartAfter), AUCTION_PERIOD_TIME, self, True):
lot.auctionPeriod.startDate = None
def invalidate_bids_data(self):
self.check_auction_time()
self.enquiryPeriod.invalidationDate = get_now()
for bid in self.bids:
if bid.status not in ["deleted", "draft"]:
bid.status = "invalid"
TenderESCOEU = Tender
| |
from collections import defaultdict
from datetime import date, datetime, timedelta
import json
import time
from django import http
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.datastructures import SortedDict
from django.views.decorators.cache import never_cache
from tower import ugettext as _
import amo
import devhub.tasks
from abuse.models import AbuseReport
from access import acl
from addons.decorators import addon_view, addon_view_factory
from addons.models import Addon, Version
from amo.decorators import json_view, post_required
from amo.utils import paginate
from amo.urlresolvers import reverse
from devhub.models import ActivityLog, AddonLog, CommentLog
from editors import forms
from editors.models import (AddonCannedResponse, EditorSubscription, EventLog,
PerformanceGraph, ReviewerScore,
ViewFastTrackQueue, ViewFullReviewQueue,
ViewPendingQueue, ViewPreliminaryQueue,
ViewQueue,
ViewUnlistedFullReviewQueue,
ViewUnlistedPendingQueue,
ViewUnlistedPreliminaryQueue)
from editors.helpers import (ViewFastTrackQueueTable, ViewFullReviewQueueTable,
ViewPendingQueueTable, ViewPreliminaryQueueTable,
ViewUnlistedFullReviewQueueTable,
ViewUnlistedPendingQueueTable,
ViewUnlistedPreliminaryQueueTable)
from reviews.forms import ReviewFlagFormSet
from reviews.models import Review, ReviewFlag
from users.models import UserProfile
from zadmin.models import get_config, set_config
from .decorators import (addons_reviewer_required, any_reviewer_required,
unlisted_addons_reviewer_required)
def base_context(**kw):
ctx = {'motd': get_config('editors_review_motd')}
ctx.update(kw)
return ctx
def context(request, **kw):
admin_reviewer = is_admin_reviewer(request)
ctx = {'queue_counts': queue_counts(admin_reviewer=admin_reviewer),
'unlisted_queue_counts': queue_counts(
unlisted=True, admin_reviewer=admin_reviewer)}
ctx.update(base_context(**kw))
return ctx
@addons_reviewer_required
def eventlog(request):
form = forms.EventLogForm(request.GET)
eventlog = ActivityLog.objects.editor_events()
motd_editable = acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit')
if form.is_valid():
if form.cleaned_data['start']:
eventlog = eventlog.filter(created__gte=form.cleaned_data['start'])
if form.cleaned_data['end']:
eventlog = eventlog.filter(created__lt=form.cleaned_data['end'])
if form.cleaned_data['filter']:
eventlog = eventlog.filter(action=form.cleaned_data['filter'].id)
pager = amo.utils.paginate(request, eventlog, 50)
data = context(request, form=form, pager=pager,
motd_editable=motd_editable)
return render(request, 'editors/eventlog.html', data)
@addons_reviewer_required
def eventlog_detail(request, id):
log = get_object_or_404(ActivityLog.objects.editor_events(), pk=id)
review = None
# I really cannot express the depth of the insanity incarnate in
# our logging code...
if len(log.arguments) > 1 and isinstance(log.arguments[1], Review):
review = log.arguments[1]
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
can_undelete = review and review.deleted and (
is_admin or request.user.pk == log.user.pk)
if request.method == 'POST':
# A Form seems overkill for this.
if request.POST['action'] == 'undelete':
if not can_undelete:
raise PermissionDenied
ReviewerScore.award_moderation_points(
log.user, review.addon, review.id, undo=True)
review.undelete()
return redirect('editors.eventlog.detail', id)
data = context(request, log=log, can_undelete=can_undelete)
return render(request, 'editors/eventlog_detail.html', data)
@addons_reviewer_required
def beta_signed_log(request):
"""Log of all the beta files that got signed."""
form = forms.BetaSignedLogForm(request.GET)
beta_signed_log = ActivityLog.objects.beta_signed_events()
motd_editable = acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit')
if form.is_valid():
if form.cleaned_data['filter']:
beta_signed_log = beta_signed_log.filter(
action=form.cleaned_data['filter'])
pager = amo.utils.paginate(request, beta_signed_log, 50)
data = context(request, form=form, pager=pager,
motd_editable=motd_editable)
return render(request, 'editors/beta_signed_log.html', data)
@any_reviewer_required
def home(request):
if (not acl.action_allowed(request, 'Addons', 'Review') and
acl.action_allowed(request, 'Personas', 'Review')):
return http.HttpResponseRedirect(reverse('editors.themes.home'))
motd_editable = acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit')
durations = (('new', _('New Add-ons (Under 5 days)')),
('med', _('Passable (5 to 10 days)')),
('old', _('Overdue (Over 10 days)')))
progress, percentage = _editor_progress()
unlisted_progress, unlisted_percentage = _editor_progress(unlisted=True)
reviews_max_display = getattr(settings, 'EDITOR_REVIEWS_MAX_DISPLAY', 5)
reviews_total = ActivityLog.objects.total_reviews()[:reviews_max_display]
reviews_monthly = (
ActivityLog.objects.monthly_reviews()[:reviews_max_display])
reviews_total_count = ActivityLog.objects.user_approve_reviews(
request.user).count()
reviews_monthly_count = (
ActivityLog.objects.current_month_user_approve_reviews(
request.user).count())
# Try to read user position from retrieved reviews.
# If not available, query for it.
reviews_total_position = (
ActivityLog.objects.user_position(reviews_total, request.user)
or ActivityLog.objects.total_reviews_user_position(request.user))
reviews_monthly_position = (
ActivityLog.objects.user_position(reviews_monthly, request.user)
or ActivityLog.objects.monthly_reviews_user_position(request.user))
data = context(
request,
reviews_total=reviews_total,
reviews_monthly=reviews_monthly,
reviews_total_count=reviews_total_count,
reviews_monthly_count=reviews_monthly_count,
reviews_total_position=reviews_total_position,
reviews_monthly_position=reviews_monthly_position,
new_editors=EventLog.new_editors(),
eventlog=ActivityLog.objects.editor_events()[:6],
progress=progress,
unlisted_progress=unlisted_progress,
percentage=percentage,
unlisted_percentage=unlisted_percentage,
durations=durations,
reviews_max_display=reviews_max_display,
motd_editable=motd_editable)
return render(request, 'editors/home.html', data)
def _editor_progress(unlisted=False):
"""Return the progress (number of add-ons still unreviewed for a given
period of time) and the percentage (out of all add-ons of that type)."""
types = ['nominated', 'prelim', 'pending']
progress = {'new': queue_counts(types, days_max=4, unlisted=unlisted),
'med': queue_counts(types, days_min=5, days_max=10,
unlisted=unlisted),
'old': queue_counts(types, days_min=11, unlisted=unlisted)}
# Return the percent of (p)rogress out of (t)otal.
def pct(p, t):
return (p / float(t)) * 100 if p > 0 else 0
percentage = {}
for t in types:
total = progress['new'][t] + progress['med'][t] + progress['old'][t]
percentage[t] = {}
for duration in ('new', 'med', 'old'):
percentage[t][duration] = pct(progress[duration][t], total)
return (progress, percentage)
@addons_reviewer_required
def performance(request, user_id=False):
user = request.amo_user
editors = _recent_editors()
is_admin = (acl.action_allowed(request, 'Admin', '%') or
acl.action_allowed(request, 'ReviewerAdminTools', 'View'))
if is_admin and user_id:
try:
user = UserProfile.objects.get(pk=user_id)
except UserProfile.DoesNotExist:
pass # Use request.amo_user from above.
motd_editable = acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit')
monthly_data = _performance_by_month(user.id)
performance_total = _performance_total(monthly_data)
# Incentive point breakdown.
today = date.today()
month_ago = today - timedelta(days=30)
year_ago = today - timedelta(days=365)
point_total = ReviewerScore.get_total(user)
totals = ReviewerScore.get_breakdown(user)
months = ReviewerScore.get_breakdown_since(user, month_ago)
years = ReviewerScore.get_breakdown_since(user, year_ago)
def _sum(iter, types, exclude=False):
"""Sum the `total` property for items in `iter` that have an `atype`
that is included in `types` when `exclude` is False (default) or not in
`types` when `exclude` is True."""
return sum(s.total
for s in iter
if (s.atype in types) == (not exclude))
breakdown = {
'month': {
'addons': _sum(months, amo.GROUP_TYPE_ADDON),
'themes': _sum(months, amo.GROUP_TYPE_THEME),
'other': _sum(months, amo.GROUP_TYPE_ADDON + amo.GROUP_TYPE_THEME,
exclude=True)
},
'year': {
'addons': _sum(years, amo.GROUP_TYPE_ADDON),
'themes': _sum(years, amo.GROUP_TYPE_THEME),
'other': _sum(years, amo.GROUP_TYPE_ADDON + amo.GROUP_TYPE_THEME,
exclude=True)
},
'total': {
'addons': _sum(totals, amo.GROUP_TYPE_ADDON),
'themes': _sum(totals, amo.GROUP_TYPE_THEME),
'other': _sum(totals, amo.GROUP_TYPE_ADDON + amo.GROUP_TYPE_THEME,
exclude=True)
}
}
data = context(request,
monthly_data=json.dumps(monthly_data),
performance_month=performance_total['month'],
performance_year=performance_total['year'],
breakdown=breakdown, point_total=point_total,
editors=editors, current_user=user, is_admin=is_admin,
is_user=(request.amo_user.id == user.id),
motd_editable=motd_editable)
return render(request, 'editors/performance.html', data)
def _recent_editors(days=90):
since_date = datetime.now() - timedelta(days=days)
editors = (UserProfile.objects
.filter(activitylog__action__in=amo.LOG_REVIEW_QUEUE,
activitylog__created__gt=since_date)
.order_by('display_name')
.distinct())
return editors
def _performance_total(data):
# TODO(gkoberger): Fix this so it's the past X, rather than this X to date.
# (ex: March 15-April 15, not April 1 - April 15)
total_yr = dict(usercount=0, teamamt=0, teamcount=0, teamavg=0)
total_month = dict(usercount=0, teamamt=0, teamcount=0, teamavg=0)
current_year = datetime.now().year
for k, val in data.items():
if k.startswith(str(current_year)):
total_yr['usercount'] = total_yr['usercount'] + val['usercount']
total_yr['teamamt'] = total_yr['teamamt'] + val['teamamt']
total_yr['teamcount'] = total_yr['teamcount'] + val['teamcount']
current_label_month = datetime.now().isoformat()[:7]
if current_label_month in data:
total_month = data[current_label_month]
return dict(month=total_month, year=total_yr)
def _performance_by_month(user_id, months=12, end_month=None, end_year=None):
monthly_data = SortedDict()
now = datetime.now()
if not end_month:
end_month = now.month
if not end_year:
end_year = now.year
end_time = time.mktime((end_year, end_month + 1, 1, 0, 0, 0, 0, 0, -1))
start_time = time.mktime((end_year, end_month + 1 - months,
1, 0, 0, 0, 0, 0, -1))
sql = (PerformanceGraph.objects
.filter_raw('log_activity.created >=',
date.fromtimestamp(start_time).isoformat())
.filter_raw('log_activity.created <',
date.fromtimestamp(end_time).isoformat()))
for row in sql.all():
label = row.approval_created.isoformat()[:7]
if label not in monthly_data:
xaxis = row.approval_created.strftime('%b %Y')
monthly_data[label] = dict(teamcount=0, usercount=0,
teamamt=0, label=xaxis)
monthly_data[label]['teamamt'] = monthly_data[label]['teamamt'] + 1
monthly_data_count = monthly_data[label]['teamcount']
monthly_data[label]['teamcount'] = monthly_data_count + row.total
if row.user_id == user_id:
user_count = monthly_data[label]['usercount']
monthly_data[label]['usercount'] = user_count + row.total
# Calculate averages
for i, vals in monthly_data.items():
average = round(vals['teamcount'] / float(vals['teamamt']), 1)
monthly_data[i]['teamavg'] = str(average) # floats aren't valid json
return monthly_data
@addons_reviewer_required
def motd(request):
form = None
if acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit'):
form = forms.MOTDForm(
initial={'motd': get_config('editors_review_motd')})
data = context(request, form=form)
return render(request, 'editors/motd.html', data)
@addons_reviewer_required
@post_required
def save_motd(request):
if not acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit'):
raise PermissionDenied
form = forms.MOTDForm(request.POST)
if form.is_valid():
set_config('editors_review_motd', form.cleaned_data['motd'])
return redirect(reverse('editors.motd'))
data = context(request, form=form)
return render(request, 'editors/motd.html', data)
def is_admin_reviewer(request):
return acl.action_allowed(request, 'ReviewerAdminTools', 'View')
def exclude_admin_only_addons(queryset):
return queryset.filter(admin_review=False)
def _queue(request, TableObj, tab, qs=None, unlisted=False):
if qs is None:
qs = TableObj.Meta.model.objects.all()
if request.GET:
search_form = forms.QueueSearchForm(request.GET)
if search_form.is_valid():
qs = search_form.filter_qs(qs)
else:
search_form = forms.QueueSearchForm()
admin_reviewer = is_admin_reviewer(request)
if not admin_reviewer and not search_form.data.get('searching'):
qs = exclude_admin_only_addons(qs)
motd_editable = acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit')
order_by = request.GET.get('sort', TableObj.default_order_by())
order_by = TableObj.translate_sort_cols(order_by)
table = TableObj(data=qs, order_by=order_by)
default = 100
per_page = request.GET.get('per_page', default)
try:
per_page = int(per_page)
except ValueError:
per_page = default
if per_page <= 0 or per_page > 200:
per_page = default
page = paginate(request, table.rows, per_page=per_page)
table.set_page(page)
return render(request, 'editors/queue.html',
context(request, table=table, page=page, tab=tab,
search_form=search_form,
point_types=amo.REVIEWED_AMO,
unlisted=unlisted,
motd_editable=motd_editable))
def queue_counts(type=None, unlisted=False, admin_reviewer=False, **kw):
def construct_query(query_type, days_min=None, days_max=None):
query = query_type.objects
if not admin_reviewer:
query = exclude_admin_only_addons(query)
if days_min:
query = query.having('waiting_time_days >=', days_min)
if days_max:
query = query.having('waiting_time_days <=', days_max)
return query.count
counts = {'pending': construct_query(ViewPendingQueue, **kw),
'nominated': construct_query(ViewFullReviewQueue, **kw),
'prelim': construct_query(ViewPreliminaryQueue, **kw),
'fast_track': construct_query(ViewFastTrackQueue, **kw),
'moderated': (
Review.objects.filter(reviewflag__isnull=False,
editorreview=1).count)}
if unlisted:
counts = {
'pending': construct_query(ViewUnlistedPendingQueue, **kw),
'nominated': construct_query(ViewUnlistedFullReviewQueue, **kw),
'prelim': construct_query(ViewUnlistedPreliminaryQueue, **kw)}
rv = {}
if isinstance(type, basestring):
return counts[type]()
for k, v in counts.items():
if not isinstance(type, list) or k in type:
rv[k] = v()
return rv
@addons_reviewer_required
def queue(request):
return redirect(reverse('editors.queue_pending'))
@addons_reviewer_required
def queue_nominated(request):
return _queue(request, ViewFullReviewQueueTable, 'nominated')
@addons_reviewer_required
def queue_pending(request):
return _queue(request, ViewPendingQueueTable, 'pending')
@addons_reviewer_required
def queue_prelim(request):
return _queue(request, ViewPreliminaryQueueTable, 'prelim')
@addons_reviewer_required
def queue_fast_track(request):
return _queue(request, ViewFastTrackQueueTable, 'fast_track')
@addons_reviewer_required
def queue_moderated(request):
rf = (Review.objects.exclude(Q(addon__isnull=True) |
Q(reviewflag__isnull=True))
.filter(editorreview=1)
.order_by('reviewflag__created'))
page = paginate(request, rf, per_page=20)
motd_editable = acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit')
flags = dict(ReviewFlag.FLAGS)
reviews_formset = ReviewFlagFormSet(request.POST or None,
queryset=page.object_list,
request=request)
if request.method == 'POST':
if reviews_formset.is_valid():
reviews_formset.save()
else:
amo.messages.error(
request, ' '.join(e.as_text() or _('An unknown error occurred')
for e in reviews_formset.errors))
return redirect(reverse('editors.queue_moderated'))
return render(request, 'editors/queue.html',
context(request, reviews_formset=reviews_formset,
tab='moderated', page=page, flags=flags,
search_form=None,
point_types=amo.REVIEWED_AMO,
motd_editable=motd_editable))
@unlisted_addons_reviewer_required
def unlisted_queue(request):
return redirect(reverse('editors.unlisted_queue_pending'))
@unlisted_addons_reviewer_required
def unlisted_queue_nominated(request):
return _queue(request, ViewUnlistedFullReviewQueueTable, 'nominated',
unlisted=True)
@unlisted_addons_reviewer_required
def unlisted_queue_pending(request):
return _queue(request, ViewUnlistedPendingQueueTable, 'pending',
unlisted=True)
@unlisted_addons_reviewer_required
def unlisted_queue_prelim(request):
return _queue(request, ViewUnlistedPreliminaryQueueTable, 'prelim',
unlisted=True)
@addons_reviewer_required
@post_required
@json_view
def application_versions_json(request):
app_id = request.POST['application_id']
f = forms.QueueSearchForm()
return {'choices': f.version_choices_for_app_id(app_id)}
@addons_reviewer_required
@addon_view_factory(qs=Addon.with_unlisted.all)
def review(request, addon):
if not addon.is_listed and not acl.check_unlisted_addons_reviewer(request):
raise http.Http404
version = addon.latest_version
if not settings.ALLOW_SELF_REVIEWS and addon.has_author(request.amo_user):
amo.messages.warning(request, _('Self-reviews are not allowed.'))
return redirect(reverse('editors.queue'))
form = forms.get_review_form(request.POST or None, request=request,
addon=addon, version=version)
queue_type = (form.helper.review_type if form.helper.review_type
!= 'preliminary' else 'prelim')
if addon.is_listed:
redirect_url = reverse('editors.queue_%s' % queue_type)
else:
redirect_url = reverse('editors.unlisted_queue_%s' % queue_type)
is_admin = acl.action_allowed(request, 'Addons', 'Edit')
if request.method == 'POST' and form.is_valid():
form.helper.process()
if form.cleaned_data.get('notify'):
EditorSubscription.objects.get_or_create(user=request.amo_user,
addon=addon)
if form.cleaned_data.get('adminflag') and is_admin:
addon.update(admin_review=False)
amo.messages.success(request, _('Review successfully processed.'))
return redirect(redirect_url)
# Kick off validation tasks for any files in this version which don't have
# cached validation, since editors will almost certainly need to access
# them. But only if we're not running in eager mode, since that could mean
# blocking page load for several minutes.
if not getattr(settings, 'CELERY_ALWAYS_EAGER', False):
for file_ in version.files.all():
if not file_.has_been_validated:
devhub.tasks.validate(file_)
canned = AddonCannedResponse.objects.all()
actions = form.helper.actions.items()
statuses = [amo.STATUS_PUBLIC, amo.STATUS_LITE,
amo.STATUS_LITE_AND_NOMINATED]
try:
show_diff = (addon.versions.exclude(id=version.id)
.filter(files__isnull=False,
created__lt=version.created,
files__status__in=statuses)
.latest())
except Version.DoesNotExist:
show_diff = None
# The actions we should show a minimal form from.
actions_minimal = [k for (k, a) in actions if not a.get('minimal')]
# We only allow the user to check/uncheck files for "pending"
allow_unchecking_files = form.helper.review_type == "pending"
versions = (Version.objects.filter(addon=addon)
.exclude(files__status=amo.STATUS_BETA)
.order_by('-created')
.transform(Version.transformer_activity)
.transform(Version.transformer))
class PseudoVersion(object):
def __init__(self):
self.all_activity = []
all_files = ()
approvalnotes = None
compatible_apps_ordered = ()
releasenotes = None
status = 'Deleted',
@property
def created(self):
return self.all_activity[0].created
@property
def version(self):
return (self.all_activity[0].activity_log
.details.get('version', '[deleted]'))
# Grab review history for deleted versions of this add-on
comments = (CommentLog.objects
.filter(activity_log__action__in=amo.LOG_REVIEW_QUEUE,
activity_log__versionlog=None,
activity_log__addonlog__addon=addon)
.order_by('created')
.select_related('activity_log'))
comment_versions = defaultdict(PseudoVersion)
for c in comments:
c.version = c.activity_log.details.get('version', c.created)
comment_versions[c.version].all_activity.append(c)
all_versions = comment_versions.values()
all_versions.extend(versions)
all_versions.sort(key=lambda v: v.created,
reverse=True)
pager = amo.utils.paginate(request, all_versions, 10)
num_pages = pager.paginator.num_pages
count = pager.paginator.count
try:
flags = ViewQueue.objects.get(id=addon.id).flags
except ViewQueue.DoesNotExist:
flags = []
user_changes_actions = [
amo.LOG.ADD_USER_WITH_ROLE.id,
amo.LOG.CHANGE_USER_WITH_ROLE.id,
amo.LOG.REMOVE_USER_WITH_ROLE.id]
user_changes_log = AddonLog.objects.filter(
activity_log__action__in=user_changes_actions,
addon=addon).order_by('id')
ctx = context(request, version=version, addon=addon,
pager=pager, num_pages=num_pages, count=count, flags=flags,
form=form, canned=canned, is_admin=is_admin,
show_diff=show_diff,
allow_unchecking_files=allow_unchecking_files,
actions=actions, actions_minimal=actions_minimal,
whiteboard_form=forms.WhiteboardForm(instance=addon),
user_changes=user_changes_log,
unlisted=not addon.is_listed)
return render(request, 'editors/review.html', ctx)
@never_cache
@json_view
@addons_reviewer_required
def review_viewing(request):
if 'addon_id' not in request.POST:
return {}
addon_id = request.POST['addon_id']
user_id = request.amo_user.id
current_name = ''
is_user = 0
key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, addon_id)
interval = amo.EDITOR_VIEWING_INTERVAL
# Check who is viewing.
currently_viewing = cache.get(key)
# If nobody is viewing or current user is, set current user as viewing
if not currently_viewing or currently_viewing == user_id:
# We want to save it for twice as long as the ping interval,
# just to account for latency and the like.
cache.set(key, user_id, interval * 2)
currently_viewing = user_id
current_name = request.amo_user.name
is_user = 1
else:
current_name = UserProfile.objects.get(pk=currently_viewing).name
return {'current': currently_viewing, 'current_name': current_name,
'is_user': is_user, 'interval_seconds': interval}
@never_cache
@json_view
@addons_reviewer_required
def queue_viewing(request):
if 'addon_ids' not in request.POST:
return {}
viewing = {}
user_id = request.amo_user.id
for addon_id in request.POST['addon_ids'].split(','):
addon_id = addon_id.strip()
key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, addon_id)
currently_viewing = cache.get(key)
if currently_viewing and currently_viewing != user_id:
viewing[addon_id] = (UserProfile.objects
.get(id=currently_viewing)
.display_name)
return viewing
@json_view
@addons_reviewer_required
def queue_version_notes(request, addon_id):
addon = get_object_or_404(Addon, pk=addon_id)
version = addon.latest_version
return {'releasenotes': unicode(version.releasenotes),
'approvalnotes': version.approvalnotes}
@addons_reviewer_required
def reviewlog(request):
data = request.GET.copy()
motd_editable = acl.action_allowed(request, 'AddonReviewerMOTD', 'Edit')
if not data.get('start') and not data.get('end'):
today = date.today()
data['start'] = date(today.year, today.month, 1)
form = forms.ReviewLogForm(data)
approvals = ActivityLog.objects.review_queue()
if not acl.check_unlisted_addons_reviewer(request):
# Display logs related to unlisted add-ons only to senior reviewers.
approvals = approvals.filter(addonlog__addon__is_listed=True)
if form.is_valid():
data = form.cleaned_data
if data['start']:
approvals = approvals.filter(created__gte=data['start'])
if data['end']:
approvals = approvals.filter(created__lt=data['end'])
if data['search']:
term = data['search']
approvals = approvals.filter(
Q(commentlog__comments__icontains=term) |
Q(addonlog__addon__name__localized_string__icontains=term) |
Q(user__display_name__icontains=term) |
Q(user__username__icontains=term)).distinct()
pager = amo.utils.paginate(request, approvals, 50)
ad = {
amo.LOG.APPROVE_VERSION.id: _('was approved'),
amo.LOG.PRELIMINARY_VERSION.id: _('given preliminary review'),
amo.LOG.REJECT_VERSION.id: _('rejected'),
amo.LOG.ESCALATE_VERSION.id: _(
'escalated', 'editors_review_history_nominated_adminreview'),
amo.LOG.REQUEST_INFORMATION.id: _('needs more information'),
amo.LOG.REQUEST_SUPER_REVIEW.id: _('needs super review'),
amo.LOG.COMMENT_VERSION.id: _('commented'),
}
data = context(request, form=form, pager=pager, ACTION_DICT=ad,
motd_editable=motd_editable)
return render(request, 'editors/reviewlog.html', data)
@addons_reviewer_required
@addon_view
def abuse_reports(request, addon):
reports = AbuseReport.objects.filter(addon=addon).order_by('-created')
total = reports.count()
reports = amo.utils.paginate(request, reports)
data = context(request, addon=addon, reports=reports, total=total)
return render(request, 'editors/abuse_reports.html', data)
@addons_reviewer_required
def leaderboard(request):
return render(request, 'editors/leaderboard.html', context(
request, scores=ReviewerScore.all_users_by_score()))
@addons_reviewer_required
@addon_view_factory(qs=Addon.with_unlisted.all)
def whiteboard(request, addon):
form = forms.WhiteboardForm(request.POST or None, instance=addon)
if form.is_valid():
addon = form.save()
return redirect('editors.review', addon.pk)
raise PermissionDenied
| |
from __future__ import print_function
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import matplotlib.pyplot as plt
from lasagne.layers import InputLayer, Conv1DLayer, Pool1DLayer
UPSAMPLE = True
VERBOSE = False
# Load EEG data
base_dir = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__), os.pardir), os.pardir))
data_dir = os.path.join(base_dir, "data")
data = np.load(os.path.join(data_dir, 'all_data_6_2d_full.npy'))
data = data.reshape(-1, 1, 64, 512)
# Change to 64 channels of 1D vectors
data = np.transpose(data,(0, 2, 1, 3)) # Equivalent do tensor dimshuffle
data = data.squeeze()
data_labels = np.load(os.path.join(data_dir, 'all_data_6_2d_full_labels.npy'))
data_labels = data_labels[:,1]
# Upsample the under-represented MW class
if UPSAMPLE:
mw_idx = np.where(data_labels==1)
mw_data = data[mw_idx]
mw_data_labels = data_labels[mw_idx]
num_mw = len(mw_idx[0])
num_ot = data.shape[0] - num_mw
num_to_bootstrap = num_ot - num_mw
bootstrap_idx = np.random.randint(mw_data.shape[0], size=num_to_bootstrap)
mw_data_boot = mw_data[bootstrap_idx]
mw_data_labels_boot = mw_data_labels[bootstrap_idx]
data = np.concatenate((data, mw_data_boot), axis=0)
data_labels = np.concatenate((data_labels, mw_data_labels_boot), axis=0)
# Create train, validation, test sets
#rng = np.random.RandomState(225)
indices = np.random.permutation(data.shape[0])
split_train, split_val, split_test = .6, .2, .2
split_train = int(round(data.shape[0]*split_train))
split_val = split_train + int(round(data.shape[0]*split_val))
train_idx = indices[:split_train]
val_idx = indices[split_train:split_val]
test_idx = indices[split_val:]
train_data = data[train_idx,:]
train_labels = data_labels[train_idx]
val_data = data[val_idx,:]
val_labels = data_labels[val_idx]
test_data = data[test_idx,:]
test_labels = data_labels[test_idx]
def build_cnn(input_var=None):
# Input layer, as usual:
l_in = InputLayer(shape=(None, 64, 512), input_var=input_var)
l_conv1 = Conv1DLayer(incoming = l_in, num_filters = 128, filter_size = 3,
stride = 1, pad = 'same', W = lasagne.init.Normal(std = 0.02),
nonlinearity = lasagne.nonlinearities.very_leaky_rectify)
l_pool1 = Pool1DLayer(incoming = l_conv1, pool_size = 4, stride = 4)
# A fully-connected layer
l_fc = lasagne.layers.DenseLayer(
l_pool1,
num_units=512,
nonlinearity=lasagne.nonlinearities.rectify)
l_out = lasagne.layers.DenseLayer(
l_fc,
num_units=2,
nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# ############################# Batch iterator ###############################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. For big datasets, you could load numpy
# arrays as memory-mapped files (np.load(..., mmap_mode='r')), or write your
# own custom data iteration function. For small datasets, you can also copy
# them to GPU at once for slightly improved performance. This would involve
# several changes in the main program, though, and is not demonstrated here.
# Notice that this function returns only mini-batches of size `batchsize`.
# If the size of the data is not a multiple of `batchsize`, it will not
# return the last (remaining) mini-batch.
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
# ############################## Main program ################################
# Everything else will be handled in our main program now. We could pull out
# more functions to better separate the code, but it wouldn't make it any
# easier to read.
def main(model='cnn', batch_size=500, num_epochs=500):
# Prepare Theano variables for inputs and targets
input_var = T.tensor3('inputs')
target_var = T.ivector('targets')
network = build_cnn(input_var)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
# We could add some weight decay as well here, see lasagne.regularization.
train_acc = T.mean(T.eq(T.argmax(prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
#updates = lasagne.updates.adam(loss, params, learning_rate=0.1)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.001)
# Create a loss expression for validation/testing. The crucial difference
# here is that we do a deterministic forward pass through the network,
# disabling dropout layers.
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
# As a bonus, also create an expression for the classification accuracy:
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function([input_var, target_var], [loss, train_acc], updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
training_hist = []
val_hist = []
# Finally, launch the training loop.
print("Starting training...")
# We iterate over epochs:
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
print("Training epoch {}...".format(epoch+1))
train_err = 0
train_acc = 0
train_batches = 0
start_time = time.time()
for batch in iterate_minibatches(train_data, train_labels, batch_size, shuffle=True):
inputs, targets = batch
err, acc = train_fn(inputs, targets)
train_err += err
train_acc += acc
train_batches += 1
if VERBOSE:
print("Epoch: {} | Mini-batch: {}/{} | Elapsed time: {:.2f}s".format(
epoch+1,
train_batches,
train_data.shape[0]/batch_size,
time.time()-start_time))
training_hist.append(train_err / train_batches)
# And a full pass over the validation data:
print("Validating epoch...")
val_err = 0
val_acc = 0
val_batches = 0
for batch in iterate_minibatches(val_data, val_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
val_hist.append(val_err / val_batches)
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss:\t\t{:.6f}".format(train_err / train_batches))
print(" training accuracy:\t\t{:.2f} %".format(
train_acc / train_batches * 100))
print(" validation loss:\t\t{:.6f}".format(val_err / val_batches))
print(" validation accuracy:\t\t{:.2f} %".format(
val_acc / val_batches * 100))
# After training, we compute and print the test predictions/error:
test_err = 0
test_acc = 0
test_batches = 0
for batch in iterate_minibatches(test_data, test_labels, batch_size, shuffle=False):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
test_err += err
test_acc += acc
test_batches += 1
print("Final results:")
print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches))
print(" test accuracy:\t\t{:.2f} %".format(
test_acc / test_batches * 100))
# Plot learning
plt.plot(range(1, num_epochs+1), training_hist, label="Training")
plt.plot(range(1, num_epochs+1), val_hist, label="Validation")
plt.grid(True)
plt.title("Training Curve")
plt.xlim(1, num_epochs+1)
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc='best')
plt.show()
# Optionally, you could now dump the network weights to a file like this:
# np.savez('model.npz', *lasagne.layers.get_all_param_values(network))
#
# And load them again later on like this:
# with np.load('model.npz') as f:
# param_values = [f['arr_%d' % i] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(network, param_values)
# Run the model
main(batch_size=100, num_epochs=20)
| |
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import sys
import threading
import time
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"E.g. https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell']")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval.")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval.")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data) as analogy_f:
for line in analogy_f:
if line.startswith(":"): # Skip comments.
continue
words = line.strip().lower().split(" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write(opts.vocab_words[i] + " " + str(opts.vocab_counts[i]) + "\n")
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path,
graph_def=self._session.graph_def)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
opts.save_path + "model",
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy('france', 'paris', 'russia')
# [1]: model.nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import tempfile
import shutil
from nipype.testing import (assert_equal, assert_not_equal, assert_raises,
skipif)
from nipype.utils.filemanip import split_filename
import nipype.interfaces.fsl.preprocess as fsl
from nipype.interfaces.fsl import Info
from nipype.interfaces.base import File, TraitError, Undefined
from nipype.interfaces.fsl import no_fsl
@skipif(no_fsl)
def fsl_name(obj, fname):
"""Create valid fsl name, including file extension for output type.
"""
ext = Info.output_type_to_ext(obj.inputs.output_type)
return fname + ext
tmp_infile = None
tmp_dir = None
@skipif(no_fsl)
def setup_infile():
global tmp_infile, tmp_dir
ext = Info.output_type_to_ext(Info.output_type())
tmp_dir = tempfile.mkdtemp()
tmp_infile = os.path.join(tmp_dir, 'foo' + ext)
file(tmp_infile, 'w')
return tmp_infile, tmp_dir
def teardown_infile(tmp_dir):
shutil.rmtree(tmp_dir)
# test BET
#@with_setup(setup_infile, teardown_infile)
#broken in nose with generators
@skipif(no_fsl)
def test_bet():
tmp_infile, tp_dir = setup_infile()
better = fsl.BET()
yield assert_equal, better.cmd, 'bet'
# Test raising error with mandatory args absent
yield assert_raises, ValueError, better.run
# Test generated outfile name
better.inputs.in_file = tmp_infile
outfile = fsl_name(better, 'foo_brain')
outpath = os.path.join(os.getcwd(), outfile)
realcmd = 'bet %s %s' % (tmp_infile, outpath)
yield assert_equal, better.cmdline, realcmd
# Test specified outfile name
outfile = fsl_name(better, '/newdata/bar')
better.inputs.out_file = outfile
realcmd = 'bet %s %s' % (tmp_infile, outfile)
yield assert_equal, better.cmdline, realcmd
# infile foo.nii doesn't exist
def func():
better.run(in_file='foo2.nii', out_file='bar.nii')
yield assert_raises, TraitError, func
# Our options and some test values for them
# Should parallel the opt_map structure in the class for clarity
opt_map = {
'outline': ('-o', True),
'mask': ('-m', True),
'skull': ('-s', True),
'no_output': ('-n', True),
'frac': ('-f 0.40', 0.4),
'vertical_gradient': ('-g 0.75', 0.75),
'radius': ('-r 20', 20),
'center': ('-c 54 75 80', [54, 75, 80]),
'threshold': ('-t', True),
'mesh': ('-e', True),
'surfaces': ('-A', True)
#'verbose': ('-v', True),
#'flags': ('--i-made-this-up', '--i-made-this-up'),
}
# Currently we don't test -R, -S, -B, -Z, -F, -A or -A2
# test each of our arguments
better = fsl.BET()
outfile = fsl_name(better, 'foo_brain')
outpath = os.path.join(os.getcwd(), outfile)
for name, settings in opt_map.items():
better = fsl.BET(**{name: settings[1]})
# Add mandatory input
better.inputs.in_file = tmp_infile
realcmd = ' '.join([better.cmd, tmp_infile, outpath, settings[0]])
yield assert_equal, better.cmdline, realcmd
teardown_infile(tmp_dir)
# test fast
@skipif(no_fsl)
def test_fast():
tmp_infile, tp_dir = setup_infile()
faster = fsl.FAST()
faster.inputs.verbose = True
fasted = fsl.FAST(in_files=tmp_infile, verbose = True)
fasted2 = fsl.FAST(in_files=[tmp_infile, tmp_infile], verbose = True)
yield assert_equal, faster.cmd, 'fast'
yield assert_equal, faster.inputs.verbose, True
yield assert_equal, faster.inputs.manual_seg , Undefined
yield assert_not_equal, faster.inputs, fasted.inputs
yield assert_equal, fasted.cmdline, 'fast -v -S 1 %s'%(tmp_infile)
yield assert_equal, fasted2.cmdline, 'fast -v -S 2 %s %s'%(tmp_infile,
tmp_infile)
faster = fsl.FAST()
faster.inputs.in_files = tmp_infile
yield assert_equal, faster.cmdline, 'fast -S 1 %s'%(tmp_infile)
faster.inputs.in_files = [tmp_infile, tmp_infile]
yield assert_equal, faster.cmdline, 'fast -S 2 %s %s'%(tmp_infile, tmp_infile)
# Our options and some test values for them
# Should parallel the opt_map structure in the class for clarity
opt_map = {'number_classes': ('-n 4', 4),
'bias_iters': ('-I 5', 5),
'bias_lowpass': ('-l 15', 15),
'img_type': ('-t 2', 2),
'init_seg_smooth': ('-f 0.035', 0.035),
'segments': ('-g', True),
'init_transform': ('-a %s'%(tmp_infile), '%s'%(tmp_infile)),
'other_priors': ('-A %s %s %s'%(tmp_infile, tmp_infile,
tmp_infile),
(['%s'%(tmp_infile),
'%s'%(tmp_infile),
'%s'%(tmp_infile)])),
'no_pve': ('--nopve', True),
'output_biasfield': ('-b', True),
'output_biascorrected': ('-B', True),
'no_bias': ('-N', True),
'out_basename': ('-o fasted', 'fasted'),
'use_priors': ('-P', True),
'segment_iters': ('-W 14', 14),
'mixel_smooth': ('-R 0.25', 0.25),
'iters_afterbias': ('-O 3', 3),
'hyper': ('-H 0.15', 0.15),
'verbose': ('-v', True),
'manual_seg': ('-s %s'%(tmp_infile),
'%s'%(tmp_infile)),
'probability_maps': ('-p', True),
}
# test each of our arguments
for name, settings in opt_map.items():
faster = fsl.FAST(in_files=tmp_infile, **{name: settings[1]})
yield assert_equal, faster.cmdline, ' '.join([faster.cmd,
settings[0],
"-S 1 %s"%tmp_infile])
teardown_infile(tmp_dir)
@skipif(no_fsl)
def setup_flirt():
ext = Info.output_type_to_ext(Info.output_type())
tmpdir = tempfile.mkdtemp()
_, infile = tempfile.mkstemp(suffix = ext, dir = tmpdir)
_, reffile = tempfile.mkstemp(suffix = ext, dir = tmpdir)
return tmpdir, infile, reffile
def teardown_flirt(tmpdir):
shutil.rmtree(tmpdir)
@skipif(no_fsl)
def test_flirt():
# setup
tmpdir, infile, reffile = setup_flirt()
flirter = fsl.FLIRT()
yield assert_equal, flirter.cmd, 'flirt'
flirter.inputs.bins = 256
flirter.inputs.cost = 'mutualinfo'
flirted = fsl.FLIRT(in_file=infile, reference=reffile,
out_file='outfile', out_matrix_file='outmat.mat',
bins = 256,
cost = 'mutualinfo')
flirt_est = fsl.FLIRT(in_file=infile, reference=reffile,
out_matrix_file='outmat.mat',
bins = 256,
cost = 'mutualinfo')
yield assert_not_equal, flirter.inputs, flirted.inputs
yield assert_not_equal, flirted.inputs, flirt_est.inputs
yield assert_equal, flirter.inputs.bins, flirted.inputs.bins
yield assert_equal, flirter.inputs.cost, flirt_est.inputs.cost
realcmd = 'flirt -in %s -ref %s -out outfile -omat outmat.mat ' \
'-bins 256 -cost mutualinfo' % (infile, reffile)
yield assert_equal, flirted.cmdline, realcmd
flirter = fsl.FLIRT()
# infile not specified
yield assert_raises, ValueError, flirter.run
flirter.inputs.in_file = infile
# reference not specified
yield assert_raises, ValueError, flirter.run
flirter.inputs.reference = reffile
# Generate outfile and outmatrix
pth, fname, ext = split_filename(infile)
outfile = os.path.join(os.getcwd(),
fsl_name(flirter, '%s_flirt' %fname))
outmat = '%s_flirt.mat' % fname
outmat = os.path.join(os.getcwd(), outmat)
realcmd = 'flirt -in %s -ref %s -out %s -omat %s' % (infile, reffile,
outfile, outmat)
yield assert_equal, flirter.cmdline, realcmd
_, tmpfile = tempfile.mkstemp(suffix = '.nii', dir = tmpdir)
# Loop over all inputs, set a reasonable value and make sure the
# cmdline is updated correctly.
for key, trait_spec in sorted(fsl.FLIRT.input_spec().traits().items()):
# Skip mandatory inputs and the trait methods
if key in ('trait_added', 'trait_modified', 'in_file', 'reference',
'environ', 'output_type', 'out_file', 'out_matrix_file',
'in_matrix_file', 'apply_xfm', 'ignore_exception',
'terminal_output', 'out_log', 'save_log'):
continue
param = None
value = None
if key == 'args':
param = '-v'
value = '-v'
elif isinstance(trait_spec.trait_type, File):
value = tmpfile
param = trait_spec.argstr % value
elif trait_spec.default is False:
param = trait_spec.argstr
value = True
elif key in ('searchr_x', 'searchr_y', 'searchr_z'):
value = [-45, 45]
param = trait_spec.argstr % ' '.join(str(elt) for elt in value)
else:
value = trait_spec.default
param = trait_spec.argstr % value
cmdline = 'flirt -in %s -ref %s' % (infile, reffile)
# Handle autogeneration of outfile
pth, fname, ext = split_filename(infile)
outfile = os.path.join(os.getcwd(),
fsl_name(fsl.FLIRT(),'%s_flirt' % fname))
outfile = ' '.join(['-out', outfile])
# Handle autogeneration of outmatrix
outmatrix = '%s_flirt.mat' % fname
outmatrix = os.path.join(os.getcwd(), outmatrix)
outmatrix = ' '.join(['-omat', outmatrix])
# Build command line
cmdline = ' '.join([cmdline, outfile, outmatrix, param])
flirter = fsl.FLIRT(in_file = infile, reference = reffile)
setattr(flirter.inputs, key, value)
yield assert_equal, flirter.cmdline, cmdline
# Test OutputSpec
flirter = fsl.FLIRT(in_file = infile, reference = reffile)
pth, fname, ext = split_filename(infile)
flirter.inputs.out_file = ''.join(['foo', ext])
flirter.inputs.out_matrix_file = ''.join(['bar', ext])
outs = flirter._list_outputs()
yield assert_equal, outs['out_file'], \
os.path.join(os.getcwd(), flirter.inputs.out_file)
yield assert_equal, outs['out_matrix_file'], \
os.path.join(os.getcwd(), flirter.inputs.out_matrix_file)
teardown_flirt(tmpdir)
# Mcflirt
@skipif(no_fsl)
def test_mcflirt():
tmpdir, infile, reffile = setup_flirt()
frt = fsl.MCFLIRT()
yield assert_equal, frt.cmd, 'mcflirt'
# Test generated outfile name
frt.inputs.in_file = infile
_, nme = os.path.split(infile)
outfile = os.path.join(os.getcwd(), nme)
outfile = frt._gen_fname(outfile, suffix = '_mcf')
realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile
yield assert_equal, frt.cmdline, realcmd
# Test specified outfile name
outfile2 = '/newdata/bar.nii'
frt.inputs.out_file = outfile2
realcmd = 'mcflirt -in ' + infile + ' -out ' + outfile2
yield assert_equal, frt.cmdline, realcmd
opt_map = {
'cost': ('-cost mutualinfo', 'mutualinfo'),
'bins': ('-bins 256', 256),
'dof': ('-dof 6', 6),
'ref_vol': ('-refvol 2', 2),
'scaling': ('-scaling 6.00', 6.00),
'smooth': ('-smooth 1.00', 1.00),
'rotation': ('-rotation 2', 2),
'stages': ('-stages 3', 3),
'init': ('-init %s'%(infile), infile),
'use_gradient': ('-gdt', True),
'use_contour': ('-edge', True),
'mean_vol': ('-meanvol', True),
'stats_imgs': ('-stats', True),
'save_mats': ('-mats', True),
'save_plots': ('-plots', True),
}
for name, settings in opt_map.items():
fnt = fsl.MCFLIRT(in_file = infile, **{name : settings[1]})
instr = '-in %s'%(infile)
outstr = '-out %s'%(outfile)
if name in ('init', 'cost', 'dof','mean_vol','bins'):
yield assert_equal, fnt.cmdline, ' '.join([fnt.cmd,
instr,
settings[0],
outstr])
else:
yield assert_equal, fnt.cmdline, ' '.join([fnt.cmd,
instr,
outstr,
settings[0]])
# Test error is raised when missing required args
fnt = fsl.MCFLIRT()
yield assert_raises, ValueError, fnt.run
teardown_flirt(tmpdir)
#test fnirt
@skipif(no_fsl)
def test_fnirt():
tmpdir, infile, reffile = setup_flirt()
fnirt = fsl.FNIRT()
yield assert_equal, fnirt.cmd, 'fnirt'
# Test list parameters
params = [('subsampling_scheme', '--subsamp', [4,2,2,1],'4,2,2,1'),
('max_nonlin_iter', '--miter', [4,4,4,2],'4,4,4,2'),
('ref_fwhm', '--reffwhm', [4,2,2,0],'4,2,2,0'),
('in_fwhm', '--infwhm', [4,2,2,0],'4,2,2,0'),
('apply_refmask', '--applyrefmask', [0,0,1,1],'0,0,1,1'),
('apply_inmask', '--applyinmask', [0,0,0,1],'0,0,0,1'),
('regularization_lambda', '--lambda', [0.5,0.75],'0.5,0.75')]
for item, flag, val, strval in params:
fnirt = fsl.FNIRT(in_file = infile,
ref_file = reffile,
**{item : val})
log = fnirt._gen_fname(infile, suffix='_log.txt', change_ext=False)
iout = fnirt._gen_fname(infile, suffix='_warped')
if item in ('max_nonlin_iter'):
cmd = 'fnirt --in=%s '\
'--logout=%s'\
' %s=%s --ref=%s'\
' --iout=%s' % (infile, log,
flag, strval, reffile, iout)
elif item in ('in_fwhm'):
cmd = 'fnirt --in=%s %s=%s --logout=%s '\
'--ref=%s --iout=%s' % (infile, flag,
strval, log, reffile, iout)
elif item.startswith('apply'):
cmd = 'fnirt %s=%s '\
'--in=%s '\
'--logout=%s '\
'--ref=%s --iout=%s' % (flag,strval,
infile, log,
reffile,
iout)
else:
cmd = 'fnirt '\
'--in=%s --logout=%s '\
'--ref=%s %s=%s --iout=%s' % (infile, log,
reffile,
flag, strval,
iout)
yield assert_equal, fnirt.cmdline, cmd
# Test ValueError is raised when missing mandatory args
fnirt = fsl.FNIRT()
yield assert_raises, ValueError, fnirt.run
fnirt.inputs.in_file = infile
fnirt.inputs.ref_file = reffile
# test files
opt_map = {
'affine_file': ('--aff='),
'inwarp_file': ('--inwarp='),
'in_intensitymap_file': ('--intin='),
'config_file': ('--config='),
'refmask_file': ('--refmask='),
'inmask_file': ('--inmask='),
'field_file': ('--fout='),
'jacobian_file': ('--jout='),
'modulatedref_file': ('--refout='),
'out_intensitymap_file':('--intout='),
'log_file': ('--logout=')}
for name, settings in opt_map.items():
fnirt = fsl.FNIRT(in_file = infile,
ref_file = reffile,
**{name : infile})
if name in ('config_file', 'affine_file','field_file'):
cmd = 'fnirt %s%s --in=%s '\
'--logout=%s '\
'--ref=%s --iout=%s' % (settings, infile, infile, log,
reffile, iout)
elif name in ('refmask_file'):
cmd = 'fnirt --in=%s '\
'--logout=%s --ref=%s '\
'%s%s '\
'--iout=%s' % (infile, log,
reffile,
settings,infile,
iout)
elif name in ('in_intensitymap_file', 'inwarp_file', 'inmask_file', 'jacobian_file'):
cmd = 'fnirt --in=%s '\
'%s%s '\
'--logout=%s --ref=%s '\
'--iout=%s' % (infile,
settings,infile,
log,
reffile,
iout)
elif name in ('log_file'):
cmd = 'fnirt --in=%s '\
'%s%s --ref=%s '\
'--iout=%s' % (infile,
settings,infile,
reffile,
iout)
else:
cmd = 'fnirt --in=%s '\
'--logout=%s %s%s '\
'--ref=%s --iout=%s' % (infile,log,
settings, infile,
reffile,iout)
yield assert_equal, fnirt.cmdline, cmd
teardown_flirt(tmpdir)
@skipif(no_fsl)
def test_applywarp():
tmpdir, infile, reffile = setup_flirt()
opt_map = {
'out_file': ('--out=bar.nii', 'bar.nii'),
'premat': ('--premat=%s'%(reffile), reffile),
'postmat': ('--postmat=%s'%(reffile), reffile),
}
# in_file, ref_file, field_file mandatory
for name, settings in opt_map.items():
awarp = fsl.ApplyWarp(in_file = infile,
ref_file = reffile,
field_file = reffile,
**{name : settings[1]})
if name == 'out_file':
realcmd = 'applywarp --warp=%s '\
'--in=%s --out=%s '\
'--ref=%s'%(reffile, infile,
settings[1],reffile)
else:
outfile = awarp._gen_fname(infile, suffix='_warp')
realcmd = 'applywarp --warp=%s '\
'--in=%s --out=%s '\
'%s --ref=%s'%(reffile, infile,
outfile, settings[0],
reffile)
yield assert_equal, awarp.cmdline, realcmd
awarp = fsl.ApplyWarp(in_file = infile,
ref_file = reffile,
field_file = reffile)
teardown_flirt(tmpdir)
@skipif(no_fsl)
def test_fugue():
input_map = dict(args = dict(argstr='%s',),
asym_se_time = dict(argstr='--asym=%.10f',),
despike_2dfilter = dict(argstr='--despike',),
despike_theshold = dict(argstr='--despikethreshold=%s',),
dwell_time = dict(argstr='--dwell=%.10f',),
dwell_to_asym_ratio = dict(argstr='--dwelltoasym=%.10f',),
environ = dict(usedefault=True,),
fmap_in_file = dict(argstr='--loadfmap=%s',),
fmap_out_file = dict(argstr='--savefmap=%s',),
fourier_order = dict(argstr='--fourier=%d',),
icorr = dict(requires=['shift_in_file'],argstr='--icorr',),
icorr_only = dict(requires=['unwarped_file'],argstr='--icorronly',),
in_file = dict(argstr='--in=%s',),
mask_file = dict(argstr='--mask=%s',),
median_2dfilter = dict(argstr='--median',),
no_extend = dict(argstr='--noextend',),
no_gap_fill = dict(argstr='--nofill',),
nokspace = dict(argstr='--nokspace',),
output_type = dict(),
pava = dict(argstr='--pava',),
phase_conjugate = dict(argstr='--phaseconj',),
phasemap_file = dict(argstr='--phasemap=%s',),
poly_order = dict(argstr='--poly=%d',),
save_unmasked_fmap = dict(requires=['fmap_out_file'],argstr='--unmaskfmap',),
save_unmasked_shift = dict(requires=['shift_out_file'],argstr='--unmaskshift',),
shift_in_file = dict(argstr='--loadshift=%s',),
shift_out_file = dict(argstr='--saveshift=%s',),
smooth2d = dict(argstr='--smooth2=%.2f',),
smooth3d = dict(argstr='--smooth3=%.2f',),
unwarp_direction = dict(argstr='--unwarpdir=%s',),
unwarped_file = dict(argstr='--unwarp=%s',),
)
instance = fsl.FUGUE()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
@skipif(no_fsl)
def test_prelude():
input_map = dict(args = dict(argstr='%s',),
complex_phase_file = dict(mandatory=True,xor=['magnitude_file', 'phase_file'],argstr='--complex=%s',),
end = dict(argstr='--end=%d',),
environ = dict(usedefault=True,),
label_file = dict(argstr='--labels=%s',),
labelprocess2d = dict(argstr='--labelslices',),
magnitude_file = dict(mandatory=True,xor=['complex_phase_file'],argstr='--abs=%s',),
mask_file = dict(argstr='--mask=%s',),
num_partitions = dict(argstr='--numphasesplit=%d',),
output_type = dict(),
phase_file = dict(mandatory=True,xor=['complex_phase_file'],argstr='--phase=%s',),
process2d = dict(xor=['labelprocess2d'],argstr='--slices',),
process3d = dict(xor=['labelprocess2d', 'process2d'],argstr='--force3D',),
rawphase_file = dict(argstr='--rawphase=%s',),
removeramps = dict(argstr='--removeramps',),
savemask_file = dict(argstr='--savemask=%s',),
start = dict(argstr='--start=%d',),
threshold = dict(argstr='--thresh=%.10f',),
unwrapped_phase_file = dict(argstr='--unwrap=%s',),
)
instance = fsl.PRELUDE()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(instance.inputs.traits()[key], metakey), value
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.SVM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class SVMTest(tf.test.TestCase):
def testRealValuedFeaturesPerfectlySeparable(self):
"""Tests SVM classifier with real valued features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.0], [1.0], [3.0]]),
'feature2': tf.constant([[1.0], [-1.2], [1.0]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=0.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are not only separable but there exist weights (for instance
# w1=0.0, w2=1.0) that satisfy the margin inequalities (y_i* w^T*x_i >=1).
# The unregularized loss should therefore be 0.0.
self.assertAlmostEqual(loss, 0.0, places=3)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithL2Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# The points are in general separable. Also, if there was no regularization,
# the margin inequalities would be satisfied too (for instance by w1=1.0,
# w2=5.0). Due to regularization, smaller weights are chosen. This results
# to a small but non-zero uneregularized loss. Still, all the predictions
# will be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.01)
self.assertLess(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithMildL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=0.5,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# Adding small L1 regularization favors even smaller weights. This results
# to somewhat moderate unregularized loss (bigger than the one when there is
# no L1 regularization. Still, since L1 is small, all the predictions will
# be correct resulting to perfect accuracy.
self.assertGreater(loss, 0.1)
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testRealValuedFeaturesWithBigL1Regularization(self):
"""Tests SVM classifier with real valued features and L2 regularization."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'feature1': tf.constant([[0.5], [1.0], [1.0]]),
'feature2': tf.constant([[1.0], [-1.0], [0.5]]),
}, tf.constant([[1], [0], [1]])
feature1 = tf.contrib.layers.real_valued_column('feature1')
feature2 = tf.contrib.layers.real_valued_column('feature2')
svm_classifier = tf.contrib.learn.SVM(feature_columns=[feature1, feature2],
example_id_column='example_id',
l1_regularization=3.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
metrics = svm_classifier.evaluate(input_fn=input_fn, steps=1)
loss = metrics['loss']
accuracy = metrics['accuracy']
# When L1 regularization parameter is large, the loss due to regularization
# outweights the unregularized loss. In this case, the classifier will favor
# very small weights (in current case 0) resulting both big unregularized
# loss and bad accuracy.
self.assertAlmostEqual(loss, 1.0, places=3)
self.assertAlmostEqual(accuracy, 1 / 3, places=3)
def testSparseFeatures(self):
"""Tests SVM classifier with (hashed) sparse features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.8], [0.6], [0.3]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
shape=[3, 1]),
}, tf.constant([[0], [1], [1]])
price = tf.contrib.layers.real_valued_column('price')
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
svm_classifier = tf.contrib.learn.SVM(feature_columns=[price, country],
example_id_column='example_id',
l1_regularization=0.0,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testBucketizedFeatures(self):
"""Tests SVM classifier with bucketized features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[600.0], [800.0], [400.0]]),
'sq_footage': tf.constant([[1000.0], [800.0], [500.0]]),
'weights': tf.constant([[1.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0])
svm_classifier = tf.contrib.learn.SVM(
feature_columns=[price_bucket, sq_footage_bucket],
example_id_column='example_id',
l1_regularization=0.1,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
def testMixedFeatures(self):
"""Tests SVM classifier with a mix of features."""
def input_fn():
return {
'example_id': tf.constant(['1', '2', '3']),
'price': tf.constant([[0.6], [0.8], [0.3]]),
'sq_footage': tf.constant([[900.0], [700.0], [600.0]]),
'country': tf.SparseTensor(values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
shape=[3, 5]),
'weights': tf.constant([[3.0], [1.0], [1.0]])
}, tf.constant([[1], [0], [1]])
price = tf.contrib.layers.real_valued_column('price')
sq_footage_bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = tf.contrib.layers.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = tf.contrib.layers.crossed_column(
[sq_footage_bucket, country],
hash_bucket_size=10)
svm_classifier = tf.contrib.learn.SVM(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
example_id_column='example_id',
weight_column_name='weights',
l1_regularization=0.1,
l2_regularization=1.0)
svm_classifier.fit(input_fn=input_fn, steps=30)
accuracy = svm_classifier.evaluate(input_fn=input_fn, steps=1)['accuracy']
self.assertAlmostEqual(accuracy, 1.0, places=3)
if __name__ == '__main__':
tf.test.main()
| |
import sys
import re
import string
import itertools
import os
import os.path
#Read file
f = open(sys.argv[1],"r")
contents = f.read()
f.close()
#//////////////////////////////////////////////////////////////////////////////
#............................Gets info from Config...........
#//////////////////////////////////////////////////////////////////////////////
f = open("QS.config","r")
Config_Contents = f.read()
f.close()
Config_Info={}
par=re.findall(r'!!!(.*?)={(.*?)}!!!',Config_Contents,re.DOTALL)
for y in par:
Config_Info[y[0]]=y[1]
#//////////////////////////////////////////////////////////////////////////////
#............................Gets info from LANG directory...........
#//////////////////////////////////////////////////////////////////////////////
path_to_LANG=Config_Info['Path']+"/LANG"
LangFiles = [x for x in os.listdir(path_to_LANG)]
Lang_Info={}
for x in LangFiles:
key=x.replace('.lang','')
f=open(path_to_LANG+"/"+x,"r")
lang_contents=f.read()
f.close()
Lang_Info[key]={}
par=re.findall(r'!!!(.*?)={(.*?)}!!!',lang_contents,re.DOTALL)
for y in par:
Lang_Info[key][y[0]]=y[1]
def codefind(contentin):
#//////////////////////////////////////////////////////////////////////////////
#.....Finds Scripts pre defined.......
#//////////////////////////////////////////////////////////////////////////////
par=re.findall(r'!!!(.*?)={(.*?)}!!!',contentin,re.DOTALL)
precontents=contentin
for x in par:
precontents=precontents.replace('!!!'+x[0]+'={'+x[1]+'}!!!','')
precontents=precontents.replace('<'+x[0]+'>',x[1])
precontents=precontents.replace('</'+x[0]+'>','</script>')
#//////////////////////////////////////////////////////////////////////////////
#.....Finds Scripts .......
#//////////////////////////////////////////////////////////////////////////////
par=re.findall(r'<script=(.*?):action={(.*?)}>(.*?)</script>',precontents,re.DOTALL)
newcontents=precontents
#script needs to be the end point of the file.
langarray=[key for key in Lang_Info]
#//////////////////////////////////////////////////////////////////////////////
#.....Creates function to replace varibles in lang files with py varibles.......
#//////////////////////////////////////////////////////////////////////////////
def var_rep(string,var):
temp=string
for key in var:
temp=temp.replace("!!"+key+"!!",str(var[key]))
return(temp)
#//////////////////////////////////////////////////////////////////////////////
#............................Creates Function to Include Script................
#//////////////////////////////////////////////////////////////////////////////
def include_script(lang,script,action,indscriptid,*other): #Function used to include scripts into page.
def latexcode_func(pre_post):
pre=''
post=''
script_escaped=script
for x in Lang_Info[lang]["back_slash_escape"]:
script_escaped=script_escaped.replace(x,"\\"+x)
if pre_post=='HF':
pre=pre+"\n<script="+lang+":action={"+action+"}>"
post=post+"</script>"
latexcode=Config_Info['ListingPre']+"""
!bs!lstdefinestyle{mystyle"""+str(indscriptid)+"""}{"""+Config_Info['ListingIn']+"""
}
!bs!begin{lstlisting}[language="""+Lang_Info[lang]["lstlisting"]+""",style=mystyle"""+str(indscriptid)+"""]"""+pre+script_escaped+post+"""!bs!end{lstlisting}
"""+Config_Info['ListingPost']
try:
par=re.findall(r'!!(.*?)\|(.*?)!!',Lang_Info[lang]['listing_replace'],re.DOTALL)
for x_lr in par:
latexcode=latexcode.replace(x_lr[0],x_lr[1])
except KeyError:
pass
return(latexcode)
if len(other)==0:
return(var_rep(Lang_Info[lang]['func.scrinc'],{"norm":latexcode_func(''),"HF":latexcode_func('HF')}))
elif other[0]=='':
return(latexcode_func(''))
elif other[0]=='HF':
return(latexcode_func('HF'))
#//////////////////////////////////////////////////////////////////////////////
#............................Creating Directory of Partial scripts....
#//////////////////////////////////////////////////////////////////////////////
part_script={}
partsc_id=0
for x in par:
lang=x[0]
action=x[1]
script=x[2]
#Script ID - allowing for combined scripts.
sid=re.findall(r'!!sid=(.*?)!!',action,re.DOTALL)
if len(sid) == 0:
sid="alone"
else:
sid=sid[0]
part_script[partsc_id]=[sid,lang,action,script] #This array contains information about the indivudual scripts
partsc_id=partsc_id+1
#//////////////////////////////////////////////////////////////////////////////
#............................Getting Functions from FUNC.........
#//////////////////////////////////////////////////////////////////////////////
def pre_functions(lang):
path_to_FUNC=Config_Info['Path']+"/FUNC/"+lang
Func_Files = [x for x in os.listdir(path_to_FUNC)]
Func_String=''
for x in Func_Files:
f=open(path_to_FUNC+"/"+x,"r")
Func_Contents=f.read()
f.close()
Func_String= Func_String+Func_Contents+'\n'
return(Func_String)
#//////////////////////////////////////////////////////////////////////////////
#........................... Creating Output if in Action.........
#//////////////////////////////////////////////////////////////////////////////
for partsc_id, ps in part_script.iteritems():
action=ps[2]
#outputs given in action.
outputted=re.findall(r'!!output=(.*?)!!',action,re.DOTALL)
if len(outputted)!=0:
To_be_outputted=outputted[0].replace('scrinc()', include_script(ps[1],ps[3],ps[2],partsc_id,'').replace('!bs!','\\'))
To_be_outputted=To_be_outputted.replace("scrinc('HF')", include_script(ps[1],ps[3],ps[2],partsc_id,'HF').replace('!bs!','\\'))
file=open("outputfc_"+str(partsc_id)+".txt","w")
file.write(To_be_outputted)
file.close()
#//////////////////////////////////////////////////////////////////////////////
#............................Forming the total scripts...........
#//////////////////////////////////////////////////////////////////////////////
scripts={}
alonecount=0
for partsc_id, ps in part_script.iteritems():
lang=ps[1]
prearray=[lang,Lang_Info[lang]["func.preamble"],pre_functions(lang)]
#defines function to add
toadd=[]
if len(re.findall('\noutput', ps[3],re.DOTALL))!=0:
toadd=toadd+[var_rep(Lang_Info[lang]['func.output'],{"i":partsc_id})]
if len(re.findall('scrinc', ps[3],re.DOTALL))!=0:
toadd=toadd+[include_script(lang,ps[3],ps[2],partsc_id)]
toadd=toadd+[ps[3].replace('from __future__ import division','')]
#end of defining function to add
if ps[0] == "alone":
scripts['alone'+str(alonecount)]=prearray+toadd
alonecount=alonecount+1
else:
try:
scripts[ps[0]].extend(toadd)
except KeyError:
scripts[ps[0]]=prearray
scripts[ps[0]].extend(toadd)
#//////////////////////////////////////////////////////////////////////////////
#............................Runs the Scripts...........
#//////////////////////////////////////////////////////////////////////////////
for scr_run in scripts.itervalues():
scr_final=''
i=1
while i<len(scr_run):
scr_final=scr_final+'\n'+scr_run[i]
i=i+1
file=open("tempcode."+Lang_Info[scr_run[0]]['file_ext'],"w")
file.write(scr_final)
file.close()
os.system(Lang_Info[scr_run[0]]['run_command'].replace('!!i!!','tempcode.'+Lang_Info[scr_run[0]]['file_ext']))
#//////////////////////////////////////////////////////////////////////////////
#.............Finds and Replaces <script...>... </script> with output..........
#//////////////////////////////////////////////////////////////////////////////
for partsc_id,ps in part_script.iteritems():
file=open('outputfc_'+str(partsc_id)+'.txt',"r")
replace=file.read()
file.close()
find='<script='+ps[1]+':action={'+ps[2]+'}>'+ps[3]+'</script>'
newcontents=newcontents.replace(find,replace)
#//////////////////////////////////////////////////////////////////////////////
#........................Line Numbering of Lsting........................
#//////////////////////////////////////////////////////////////////////////////
line_collections={}
for partsc_id,ps in part_script.iteritems():
action=ps[2]
linenumber=re.findall('!!LinNoId=(.*?)!!', action,re.DOTALL)
if len(linenumber)!=0:
ToEnter=ps[3].rstrip()#removes trailing lines.
toappend=[partsc_id,ToEnter.count('\n')]
try:
line_collections[linenumber[0]].append(toappend)
except KeyError:
line_collections[linenumber[0]]=[]
line_collections[linenumber[0]].append(toappend)
for key,array in line_collections.iteritems():
totalno=1
for x in array:
if totalno==0:
inno=1
newcontents=newcontents.replace("style=mystyle"+str(x[0]), "style=mystyle"+str(x[0])+",firstnumber="+str(totalno))
totalno=totalno+x[1]
return(newcontents)
contentout=codefind(contents)
file=open("temp.tex","w")
file.write(contentout)
file.close()
| |
param_data = {
"coherent_sct_amplitude": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "none",
"e_calibration": "none",
"fit_with_tail": "none",
"free_more": "none",
"linear": "none",
"max": 1e9,
"min": 1.0,
"default": 100.0,
"value": 91.84486613900303,
},
"coherent_sct_energy": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"description": "Incident E [keV]",
"e_calibration": "fixed",
"fit_with_tail": "fixed",
"free_more": "lohi",
"linear": "fixed",
"max": 13.0,
"min": 9.0,
"default": 12.0,
"value": 12.0,
},
"compton_amplitude": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "none",
"e_calibration": "none",
"fit_with_tail": "none",
"free_more": "none",
"linear": "none",
"max": 1e9,
"min": 0.0,
"default": 100.0,
"value": 213.7603636952067,
},
"compton_angle": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"e_calibration": "fixed",
"fit_with_tail": "lohi",
"free_more": "lohi",
"linear": "fixed",
"max": 105.0,
"min": 70.0,
"default": 90.0,
"value": 90.0,
},
"compton_f_step": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"e_calibration": "fixed",
"fit_with_tail": "fixed",
"free_more": "lohi",
"linear": "fixed",
"max": 0.01,
"min": 0.0,
"default": 0.005,
"value": 0.00842597013124,
},
"compton_f_tail": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"e_calibration": "fixed",
"fit_with_tail": "lohi",
"free_more": "fixed",
"linear": "fixed",
"max": 4.0,
"min": 1.5,
"default": 2.5,
"value": 2.5,
},
"compton_fwhm_corr": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"description": "fwhm Coef, Compton",
"e_calibration": "fixed",
"fit_with_tail": "lohi",
"free_more": "lohi",
"linear": "fixed",
"max": 3.0,
"min": 1.5,
"default": 2.0,
"value": 2.0,
},
"compton_gamma": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"e_calibration": "fixed",
"fit_with_tail": "fixed",
"free_more": "lohi",
"linear": "fixed",
"max": 4.2,
"min": 3.8,
"default": 2.5,
"value": 4.161818621138601,
},
"compton_hi_f_tail": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"e_calibration": "fixed",
"fit_with_tail": "fixed",
"free_more": "fixed",
"linear": "fixed",
"max": 1.0,
"min": 1e-06,
"default": 0.0001,
"value": 0.0001,
},
"compton_hi_gamma": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"e_calibration": "fixed",
"fit_with_tail": "fixed",
"free_more": "fixed",
"linear": "fixed",
"max": 3.0,
"min": 0.025,
"default": 0.028,
"value": 0.027,
},
"e_linear": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "lohi",
"description": "E Calib. Coef, a1",
"e_calibration": "lohi",
"fit_with_tail": "fixed",
"free_more": "lohi",
"linear": "fixed",
"max": 0.013,
"min": 0.009,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"default": 0.01,
"value": 0.01,
},
"e_offset": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "lohi",
"description": "E Calib. Coef, a0",
"e_calibration": "lohi",
"fit_with_tail": "fixed",
"free_more": "hi",
"linear": "fixed",
"max": 0.2,
"min": -0.2,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"default": 0.0,
"value": 0.0,
},
"e_quadratic": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "lohi",
"description": "E Calib. Coef, a2",
"e_calibration": "lohi",
"fit_with_tail": "fixed",
"free_more": "lohi",
"linear": "fixed",
"max": 1e-06,
"min": -1e-06,
"tool_tip": "E(channel) = a0 + a1*channel+ a2*channel**2",
"default": 0.0,
"value": 0.0,
},
"fwhm_fanoprime": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"description": "fwhm Coef, b2",
"e_calibration": "fixed",
"fit_with_tail": "lohi",
"free_more": "lohi",
"linear": "fixed",
"max": 0.0002,
"min": 1e-05,
"tool_tip": (
"width**2 = (b1/2.3548)**2 + 3.85*b2*E, 3.85keV is electron-hole pair creation energy in silicon"
),
"default": 0.000114,
"value": 0.000114,
},
"fwhm_offset": {
"adjust_element1": "fixed",
"adjust_element2": "fixed",
"adjust_element3": "fixed",
"bound_type": "fixed",
"description": "fwhm Coef, b1 [keV]",
"e_calibration": "fixed",
"fit_with_tail": "lohi",
"free_more": "lohi",
"linear": "fixed",
"max": 0.2,
"min": 0.05,
"tool_tip": "width**2 = (b1/2.3548)**2 + 3.85*b2*E",
"default": 0.1,
"value": 0.1,
},
"non_fitting_values": {
"element_list": "Al_K, S_K, Cl_K, Ar_K, Ca_K, Cr_K, Fe_K, Co_K, Ni_K, Cu_K, Pm_L, Gd_L",
"energy_bound_high": {"default_value": 12.8, "description": "E high [keV]", "value": 12.8},
"energy_bound_low": {"default_value": 1.0, "description": "E low [keV]", "value": 1.0},
"epsilon": 3.85,
"escape_ratio": 0.005,
"background_width": 0.5,
},
}
| |
# Generated by h2py from /usr/include/netinet/in.h
# Included from sys/platform.h
def _NTO_HDR_PIECE_(x): return x
def _NTO_CPU_HDR_DIR_(h): return x86/h
def _NTO_CPU_HDR_DIR_(h): return ppc/h
def _NTO_CPU_HDR_DIR_(h): return mips/h
def _NTO_CPU_HDR_DIR_(h): return sh/h
def _NTO_CPU_HDR_DIR_(h): return arm/h
def _NTO_CPU_HDR_DIR_(h): return unknown_cpu/h
__STDC_HOSTED__ = 1
__STDC_ISO_10646__ = 200009L
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
_LARGEFILE64_SOURCE = 1
_INT32 = int
# Included from inttypes.h
PRId8 = "hhd"
PRId16 = "hd"
PRId32 = "d"
PRId64 = "lld"
PRIdFAST8 = "hhd"
PRIdFAST16 = "hd"
PRIdFAST32 = "d"
PRIdFAST64 = "lld"
PRIdLEAST8 = "hhd"
PRIdLEAST16 = "hd"
PRIdLEAST32 = "d"
PRIdLEAST64 = "lld"
PRIdMAX = "lld"
PRIdPTR = PRId32
PRIdPTR = PRId64
PRIi8 = "hhi"
PRIi16 = "hi"
PRIi32 = "i"
PRIi64 = "lli"
PRIiFAST8 = "hhi"
PRIiFAST16 = "hi"
PRIiFAST32 = "i"
PRIiFAST64 = "lli"
PRIiLEAST8 = "hhi"
PRIiLEAST16 = "hi"
PRIiLEAST32 = "i"
PRIiLEAST64 = "lli"
PRIiMAX = "lli"
PRIiPTR = PRIi32
PRIiPTR = PRIi64
PRIo8 = "hho"
PRIo16 = "ho"
PRIo32 = "o"
PRIo64 = "llo"
PRIoFAST8 = "hho"
PRIoFAST16 = "ho"
PRIoFAST32 = "o"
PRIoFAST64 = "llo"
PRIoLEAST8 = "hho"
PRIoLEAST16 = "ho"
PRIoLEAST32 = "o"
PRIoLEAST64 = "llo"
PRIoMAX = "llo"
PRIoPTR = PRIo32
PRIoPTR = PRIo64
PRIu8 = "hhu"
PRIu16 = "hu"
PRIu32 = "u"
PRIu64 = "llu"
PRIuFAST8 = "hhu"
PRIuFAST16 = "hu"
PRIuFAST32 = "u"
PRIuFAST64 = "llu"
PRIuLEAST8 = "hhu"
PRIuLEAST16 = "hu"
PRIuLEAST32 = "u"
PRIuLEAST64 = "llu"
PRIuMAX = "llu"
PRIuPTR = PRIu32
PRIuPTR = PRIu64
PRIx8 = "hhx"
PRIx16 = "hx"
PRIx32 = "x"
PRIx64 = "llx"
PRIxFAST8 = "hhx"
PRIxFAST16 = "hx"
PRIxFAST32 = "x"
PRIxFAST64 = "llx"
PRIxLEAST8 = "hhx"
PRIxLEAST16 = "hx"
PRIxLEAST32 = "x"
PRIxLEAST64 = "llx"
PRIxMAX = "llx"
PRIxPTR = PRIx32
PRIxPTR = PRIx64
PRIX8 = "hhX"
PRIX16 = "hX"
PRIX32 = "X"
PRIX64 = "llX"
PRIXFAST8 = "hhX"
PRIXFAST16 = "hX"
PRIXFAST32 = "X"
PRIXFAST64 = "llX"
PRIXLEAST8 = "hhX"
PRIXLEAST16 = "hX"
PRIXLEAST32 = "X"
PRIXLEAST64 = "llX"
PRIXMAX = "llX"
PRIXPTR = PRIX32
PRIXPTR = PRIX64
SCNd8 = "hhd"
SCNd16 = "hd"
SCNd32 = "d"
SCNd64 = "lld"
SCNdFAST8 = "hhd"
SCNdFAST16 = "hd"
SCNdFAST32 = "d"
SCNdFAST64 = "lld"
SCNdLEAST8 = "hhd"
SCNdLEAST16 = "hd"
SCNdLEAST32 = "d"
SCNdLEAST64 = "lld"
SCNdMAX = "lld"
SCNdPTR = SCNd32
SCNdPTR = SCNd64
SCNi8 = "hhi"
SCNi16 = "hi"
SCNi32 = "i"
SCNi64 = "lli"
SCNiFAST8 = "hhi"
SCNiFAST16 = "hi"
SCNiFAST32 = "i"
SCNiFAST64 = "lli"
SCNiLEAST8 = "hhi"
SCNiLEAST16 = "hi"
SCNiLEAST32 = "i"
SCNiLEAST64 = "lli"
SCNiMAX = "lli"
SCNiPTR = SCNi32
SCNiPTR = SCNi64
SCNo8 = "hho"
SCNo16 = "ho"
SCNo32 = "o"
SCNo64 = "llo"
SCNoFAST8 = "hho"
SCNoFAST16 = "ho"
SCNoFAST32 = "o"
SCNoFAST64 = "llo"
SCNoLEAST8 = "hho"
SCNoLEAST16 = "ho"
SCNoLEAST32 = "o"
SCNoLEAST64 = "llo"
SCNoMAX = "llo"
SCNoPTR = SCNo32
SCNoPTR = SCNo64
SCNu8 = "hhu"
SCNu16 = "hu"
SCNu32 = "u"
SCNu64 = "llu"
SCNuFAST8 = "hhu"
SCNuFAST16 = "hu"
SCNuFAST32 = "u"
SCNuFAST64 = "llu"
SCNuLEAST8 = "hhu"
SCNuLEAST16 = "hu"
SCNuLEAST32 = "u"
SCNuLEAST64 = "llu"
SCNuMAX = "llu"
SCNuPTR = SCNu32
SCNuPTR = SCNu64
SCNx8 = "hhx"
SCNx16 = "hx"
SCNx32 = "x"
SCNx64 = "llx"
SCNxFAST8 = "hhx"
SCNxFAST16 = "hx"
SCNxFAST32 = "x"
SCNxFAST64 = "llx"
SCNxLEAST8 = "hhx"
SCNxLEAST16 = "hx"
SCNxLEAST32 = "x"
SCNxLEAST64 = "llx"
SCNxMAX = "llx"
SCNxPTR = SCNx32
SCNxPTR = SCNx64
SCNX8 = "hhX"
SCNX16 = "hX"
SCNX32 = "X"
SCNX64 = "llX"
SCNXFAST8 = "hhX"
SCNXFAST16 = "hX"
SCNXFAST32 = "X"
SCNXFAST64 = "llX"
SCNXLEAST8 = "hhX"
SCNXLEAST16 = "hX"
SCNXLEAST32 = "X"
SCNXLEAST64 = "llX"
SCNXMAX = "llX"
SCNXPTR = SCNX32
SCNXPTR = SCNX64
# Included from net/netbyte.h
# Included from gulliver.h
def ENDIAN_RET16(__x): return __cpu_endian_ret16(__x)
def ENDIAN_RET32(__x): return __cpu_endian_ret32(__x)
def ENDIAN_RET64(__x): return __cpu_endian_ret64(__x)
def ENDIAN_SWAP16(__x): return __cpu_endian_swap16(__x)
def ENDIAN_SWAP32(__x): return __cpu_endian_swap32(__x)
def ENDIAN_SWAP64(__x): return __cpu_endian_swap64(__x)
ENDIAN_STRINGNAME = "le"
def ENDIAN_LE16(__x): return (__x)
def ENDIAN_LE32(__x): return (__x)
def ENDIAN_LE64(__x): return (__x)
def ENDIAN_BE16(__x): return ENDIAN_RET16(__x)
def ENDIAN_BE32(__x): return ENDIAN_RET32(__x)
def ENDIAN_BE64(__x): return ENDIAN_RET64(__x)
ENDIAN_STRINGNAME = "be"
def ENDIAN_LE16(__x): return ENDIAN_RET16(__x)
def ENDIAN_LE32(__x): return ENDIAN_RET32(__x)
def ENDIAN_LE64(__x): return ENDIAN_RET64(__x)
def ENDIAN_BE16(__x): return (__x)
def ENDIAN_BE32(__x): return (__x)
def ENDIAN_BE64(__x): return (__x)
def UNALIGNED_RET16(__p): return __cpu_unaligned_ret16(__p)
def UNALIGNED_RET32(__p): return __cpu_unaligned_ret32(__p)
def UNALIGNED_RET64(__p): return __cpu_unaligned_ret64(__p)
def ENDIAN_CHKMSG(info): return (0)
# Included from sys/srcversion.h
def __SRCVERSION(__id): return \
def __NET_LE16(__x): return ((__x))
def __NET_LE32(__x): return ((__x))
def __NET_LE64(__x): return ((__x))
def __NET_BE16(__x): return __cpu_endian_ret16((__x))
def __NET_BE16(__x): return \
def __NET_BE32(__x): return __cpu_endian_ret32((__x))
def __NET_BE32(__x): return \
def __NET_BE64(__x): return __cpu_endian_ret64((__x))
def __NET_BE64(__x): return \
def __NET_LE16(__x): return __cpu_endian_ret16((__x))
def __NET_LE16(__x): return \
def __NET_LE32(__x): return __cpu_endian_ret32((__x))
def __NET_LE32(__x): return \
def __NET_LE64(__x): return __cpu_endian_ret64((__x))
def __NET_LE64(__x): return \
def __NET_BE16(__x): return ((__x))
def __NET_BE32(__x): return ((__x))
def __NET_BE64(__x): return ((__x))
def htons(__x): return __NET_BE16((__x))
def ntohs(__x): return __NET_BE16((__x))
def htonl(__x): return __NET_BE32((__x))
def ntohl(__x): return __NET_BE32((__x))
def htobe16(__x): return __NET_BE16((__x))
def be16toh(__x): return __NET_BE16((__x))
def htobe32(__x): return __NET_BE32((__x))
def be32toh(__x): return __NET_BE32((__x))
def htobe64(__x): return __NET_BE64((__x))
def be64toh(__x): return __NET_BE64((__x))
def htole16(__x): return __NET_LE16((__x))
def le16toh(__x): return __NET_LE16((__x))
def htole32(__x): return __NET_LE32((__x))
def le32toh(__x): return __NET_LE32((__x))
def htole64(__x): return __NET_LE64((__x))
def le64toh(__x): return __NET_LE64((__x))
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = 4
IPPROTO_TCP = 6
IPPROTO_EGP = 8
IPPROTO_PUP = 12
IPPROTO_UDP = 17
IPPROTO_IDP = 22
IPPROTO_TP = 29
IPPROTO_IPV6 = 41
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_MOBILE = 55
IPPROTO_IPV6_ICMP = 58
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_EON = 80
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_PIM = 103
IPPROTO_QNET = 106
IPPROTO_IPCOMP = 108
IPPROTO_VRRP = 112
IPPROTO_CARP = 112
IPPROTO_SCTP = 132
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
CTL_IPPROTO_IPSEC = 258
IPPORT_RESERVED = 1024
IPPORT_ANONMIN = 49152
IPPORT_ANONMAX = 65535
IPPORT_RESERVEDMIN = 600
IPPORT_RESERVEDMAX = (IPPORT_RESERVED-1)
def __IPADDR(x): return ((uint32_t)(x))
IN_CLASSA_NSHIFT = 24
IN_CLASSA_MAX = 128
IN_CLASSB_NSHIFT = 16
IN_CLASSB_MAX = 65536
IN_CLASSC_NSHIFT = 8
IN_CLASSD_NSHIFT = 28
def IN_MULTICAST(i): return IN_CLASSD(i)
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_ERRORMTU = 21
IP_IPSEC_POLICY_COMPAT = 22
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_FORWSRCRT = 5
IPCTL_DIRECTEDBCAST = 6
IPCTL_ALLOWSRCRT = 7
IPCTL_SUBNETSARELOCAL = 8
IPCTL_MTUDISC = 9
IPCTL_ANONPORTMIN = 10
IPCTL_ANONPORTMAX = 11
IPCTL_MTUDISCTIMEOUT = 12
IPCTL_MAXFLOWS = 13
IPCTL_HOSTZEROBROADCAST = 14
IPCTL_GIF_TTL = 15
IPCTL_LOWPORTMIN = 16
IPCTL_LOWPORTMAX = 17
IPCTL_MAXFRAGPACKETS = 18
IPCTL_GRE_TTL = 19
IPCTL_CHECKINTERFACE = 20
IPCTL_IFQ = 21
IPCTL_RANDOMID = 22
IPCTL_LOOPBACKCKSUM = 23
IPCTL_STATS = 24
IPCTL_MAXID = 25
# Included from netinet6/in6.h
__KAME_VERSION = "NetBSD-current"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = (-16711680)
IPV6_ADDR_INT32_MLL = (-16646144)
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = (-65536)
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_INTFACELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
def IN6_IS_SCOPE_EMBEDDABLE(__a): return \
def IFA6_IS_DEPRECATED(a): return \
def IFA6_IS_INVALID(a): return \
def IFA6_IS_DEPRECATED(a): return \
def IFA6_IS_INVALID(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_2292PKTINFO = 19
IPV6_2292HOPLIMIT = 20
IPV6_2292NEXTHOP = 21
IPV6_2292HOPOPTS = 22
IPV6_2292DSTOPTS = 23
IPV6_2292RTHDR = 24
IPV6_2292PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_IPSEC_POLICY_COMPAT = 28
IPV6_FAITH = 29
IPV6_RTHDRDSTOPTS = 35
IPV6_RECVPKTINFO = 36
IPV6_RECVHOPLIMIT = 37
IPV6_RECVRTHDR = 38
IPV6_RECVHOPOPTS = 39
IPV6_RECVDSTOPTS = 40
IPV6_RECVRTHDRDSTOPTS = 41
IPV6_USE_MIN_MTU = 42
IPV6_RECVPATHMTU = 43
IPV6_PATHMTU = 44
IPV6_PKTINFO = 46
IPV6_HOPLIMIT = 47
IPV6_NEXTHOP = 48
IPV6_HOPOPTS = 49
IPV6_DSTOPTS = 50
IPV6_RTHDR = 51
IPV6_RECVTCLASS = 57
IPV6_OTCLASS = 58
IPV6_TCLASS = 61
IPV6_DONTFRAG = 62
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_V6ONLY = 24
IPV6CTL_ANONPORTMIN = 28
IPV6CTL_ANONPORTMAX = 29
IPV6CTL_LOWPORTMIN = 30
IPV6CTL_LOWPORTMAX = 31
IPV6CTL_USE_DEFAULTZONE = 39
IPV6CTL_MAXFRAGS = 41
IPV6CTL_IFQ = 42
IPV6CTL_MAXID = 43
# Included from sys/socket.h
# Included from sys/uio.h
# Included from _pack64.h
UIO_MAXIOV = 1024
UIO_SMALLIOV = 8
# Included from _packpop.h
# Included from sys/types.h
def minor(device): return ((int)((device) & 0x3ff))
def major(device): return ((int)(((device) >> 10) & 0x3f))
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_OOBINLINE = 0x0100
SO_REUSEPORT = 0x0200
SO_TIMESTAMP = 0x0400
SO_BINDTODEVICE = 0x0800
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SO_OVERFLOWED = 0x1009
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_LOCAL = 1
AF_UNIX = AF_LOCAL
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_ROUTE = 17
AF_LINK = 18
pseudo_AF_XTP = 19
AF_COIP = 20
AF_CNT = 21
pseudo_AF_RTIP = 22
AF_IPX = 23
AF_INET6 = 24
pseudo_AF_PIP = 25
AF_ISDN = 26
AF_E164 = AF_ISDN
AF_NATM = 27
AF_ARP = 28
pseudo_AF_KEY = 29
pseudo_AF_HDRCMPLT = 30
AF_BLUETOOTH = 31
AF_IEEE80211 = 32
AF_QNET = 33
AF_MAX = 34
_SS_MAXSIZE = 128
PF_UNSPEC = AF_UNSPEC
PF_LOCAL = AF_LOCAL
PF_UNIX = PF_LOCAL
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_ROUTE = AF_ROUTE
PF_LINK = AF_LINK
PF_XTP = pseudo_AF_XTP
PF_COIP = AF_COIP
PF_CNT = AF_CNT
PF_INET6 = AF_INET6
PF_IPX = AF_IPX
PF_RTIP = pseudo_AF_RTIP
PF_PIP = pseudo_AF_PIP
PF_ISDN = AF_ISDN
PF_E164 = AF_E164
PF_NATM = AF_NATM
PF_ARP = AF_ARP
PF_KEY = pseudo_AF_KEY
PF_BLUETOOTH = AF_BLUETOOTH
PF_MAX = AF_MAX
def SOCKCREDSIZE(ngrps): return \
NET_MAXID = AF_MAX
PCB_SLOP = 20
PCB_ALL = 0
NET_RT_DUMP = 1
NET_RT_FLAGS = 2
NET_RT_OIFLIST = 3
NET_RT_IFLIST = 4
NET_RT_MAXID = 5
SOMAXCONN = 128
MSG_OOB = 0x0001
MSG_PEEK = 0x0002
MSG_DONTROUTE = 0x0004
MSG_EOR = 0x0008
MSG_TRUNC = 0x0010
MSG_CTRUNC = 0x0020
MSG_WAITALL = 0x0040
MSG_DONTWAIT = 0x0080
MSG_BCAST = 0x0100
MSG_MCAST = 0x0200
MSG_NOTIFICATION = 0x0400
MSG_NOSIGNAL = 0x0800
MSG_HDREXTEN = (-2147483648)
def CMSG_DATA(cmsg): return \
def CCMSG_DATA(cmsg): return \
def __CMSG_ALIGN(n): return (((n) + __cmsg_alignbytes()) & ~__cmsg_alignbytes())
def CMSG_ALIGN(n): return __CMSG_ALIGN(n)
def CMSG_FIRSTHDR(mhdr): return \
SCM_RIGHTS = 0x01
SCM_TIMESTAMP = 0x02
SCM_CREDS = 0x04
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
| |
#!/usr/bin/env python
# -- Content-Encoding: UTF-8 --
"""
Tests the MQTT client module
:author: Thomas Calmant
"""
import logging
import os
import sys
import threading
import time
import uuid
try:
import unittest2 as unittest
except ImportError:
import unittest
# Pelix
from pelix.utilities import to_str
try:
import pelix.misc.mqtt_client as mqtt
except ImportError:
# Missing requirement: not a fatal error
raise unittest.SkipTest("MQTT client dependency missing: skip test")
from tests.mqtt_utilities import find_mqtt_server
# ------------------------------------------------------------------------------
__version_info__ = (1, 0, 1)
__version__ = ".".join(str(x) for x in __version_info__)
MQTT_SERVER = find_mqtt_server()
if not MQTT_SERVER:
raise unittest.SkipTest("No valid MQTT server found")
# ------------------------------------------------------------------------------
def _disconnect_client(client):
"""
Disconnects the client (implementation specific)
:param client: MQTT Client
"""
# Close the socket
getattr(client, '_MqttClient__mqtt')._sock.close()
class MqttClientTest(unittest.TestCase):
"""
Tests the MQTT client provided by Pelix
"""
def test_connect(self):
"""
Test the client connection
"""
# Create client
client = mqtt.MqttClient()
event = threading.Event()
shared = []
def on_connect(clt, result_code):
if result_code == 0:
shared.append(clt)
event.set()
def on_disconnect(clt, result_code):
if result_code == 0:
shared.append(clt)
event.set()
client.on_connect = on_connect
client.on_disconnect = on_disconnect
# Check length of client ID
self.assertLessEqual(len(client.client_id), 23)
# Connect
client.connect(MQTT_SERVER)
if not event.wait(5):
# Connection failed ?
client.disconnect()
self.fail("MQTT connection timeout")
# Check client (and single call)
self.assertListEqual(shared, [client])
# Clear
del shared[:]
event.clear()
# Disconnect
client.disconnect()
if not event.wait(5):
self.fail("MQTT disconnection timeout")
# Check client (and single call)
self.assertListEqual(shared, [client])
def test_reconnect(self):
"""
Tests client reconnection
"""
if os.name == 'posix':
# FIXME: try harder
self.skipTest("This test doesn't work on POSIX...")
# Create client
client = mqtt.MqttClient()
event_connect = threading.Event()
event_disconnect = threading.Event()
def on_connect(clt, result_code):
event_connect.set()
def on_disconnect(clt, result_code):
event_disconnect.set()
client.on_connect = on_connect
client.on_disconnect = on_disconnect
# Connect
client.connect(MQTT_SERVER, 1883, 10)
if not event_connect.wait(5):
# Connection failed ?
client.disconnect()
self.fail("MQTT connection timeout")
# Send something
mid = client.publish("/pelix/test", "dummy", wait=True)
client.wait_publication(mid, 5)
# Disconnect
event_connect.clear()
_disconnect_client(client)
# Wait for event
if not event_disconnect.wait(30):
client.disconnect()
self.fail("No disconnection event after 30 seconds")
# Wait for reconnection
if not event_connect.wait(30):
client.disconnect()
self.fail("No reconnected after 30 seconds")
# Clean up
client.disconnect()
def test_will(self):
"""
Tests the will message configuration
"""
will_topic = "pelix/test/mqtt/will/{0}".format(str(uuid.uuid4()))
will_value = str(uuid.uuid4())
# Create client 1
client = mqtt.MqttClient()
event = threading.Event()
def on_connect(clt, result_code):
if result_code == 0:
event.set()
def on_disconnect(clt, result_code):
if result_code != 0:
# Disconnected unwillingly: stop the timer
# -- IMPLEMENTATION SPECIFIC --
getattr(clt, '_MqttClient__stop_timer')()
# == IMPLEMENTATION SPECIFIC ==
client.on_connect = on_connect
client.on_disconnect = on_disconnect
# Create client 2
client_2 = mqtt.MqttClient()
event_2 = threading.Event()
shared_2 = []
def on_connect_2(clt, result_code):
if result_code == 0:
event_2.set()
def on_message_2(clt, msg):
event_2.set()
shared_2.append(msg)
client_2.on_connect = on_connect_2
client_2.on_message = on_message_2
# Check clients IDs
self.assertNotEqual(client.client_id, client_2.client_id)
# Set the will for client 1
client.set_will(will_topic, will_value)
# Connect client 1
client.connect(MQTT_SERVER, 1883, 10)
if not event.wait(5):
client.disconnect()
self.fail("Client 1 timed out")
# Connect client 2
client_2.connect(MQTT_SERVER, 1883)
if not event_2.wait(5):
client_2.disconnect()
self.fail("Client 2 timed out")
# Clear events
event.clear()
event_2.clear()
# Client 2 subscribes to the will message
client_2.subscribe(will_topic)
# Wait a little, so that the subscription is activated
time.sleep(5)
# Disconnect client 1
_disconnect_client(client)
# Check client 2
if not event_2.wait(30):
client_2.disconnect()
self.fail("Will not received within 30 seconds")
# Disconnect client 2
client_2.disconnect()
# Check message
msg = shared_2[0]
self.assertEqual(msg.topic, will_topic)
self.assertEqual(to_str(msg.payload), will_value)
def test_wait_publish(self):
"""
Tests the wait_publish method
"""
msg_topic = "pelix/test/mqtt/wait/{0}".format(str(uuid.uuid4()))
msg_value = str(uuid.uuid4())
# Create client
client = mqtt.MqttClient()
event = threading.Event()
shared = []
def on_connect(clt, result_code):
if result_code == 0:
event.set()
def on_message(clt, msg):
shared.append(msg)
event.set()
client.on_connect = on_connect
client.on_message = on_message
# Connect
client.connect(MQTT_SERVER)
client.subscribe(msg_topic)
if not event.wait(5):
client.disconnect()
self.fail("Connection timeout")
# Send message
event.clear()
mid = client.publish(msg_topic, msg_value, wait=True)
client.wait_publication(mid)
# Wait for the message to be received
if not event.wait(5):
client.disconnect()
self.fail("Message not received after publication")
# Disconnect
client.disconnect()
# Get the message
msg = shared[0]
self.assertEqual(msg.topic, msg_topic)
self.assertEqual(to_str(msg.payload), msg_value)
def test_client_id(self):
"""
Tests the generation of a client ID
"""
# With default prefix
clt_id = mqtt.MqttClient.generate_id()
# Check length of ID
self.assertLessEqual(len(clt_id), 23)
# With a given prefix
for prefix in (None, "", "+", "prefix"):
clt_id = mqtt.MqttClient.generate_id(prefix)
# Check length of ID
self.assertLessEqual(len(clt_id), 23)
# Check prefix
if prefix:
self.assertTrue(clt_id.startswith(prefix),
"Prefix not in client ID")
# With a long prefix, around the maximum length
for length in (20, 23, 25):
prefix = 'a' * length
clt_id = mqtt.MqttClient.generate_id(prefix)
# Check length of ID
self.assertLessEqual(len(clt_id), 23)
# Check uniqueness
self.assertNotEqual(clt_id, mqtt.MqttClient.generate_id(prefix))
def test_constructor(self):
"""
Tests the client ID handling in the constructor
"""
# Valid ID given
for client_id in ("custom_id", "other-id",
mqtt.MqttClient.generate_id()):
client = mqtt.MqttClient(client_id)
self.assertEqual(client.client_id, client_id)
# No ID given
for client_id in (None, ""):
client = mqtt.MqttClient(client_id)
# Check length of ID
self.assertLessEqual(len(client.client_id), 23)
self.assertGreater(len(client.client_id), 0)
# Long ID
long_id = "a" * 30
if sys.version_info[:2] >= (3, 4):
# assertLogs has been added in Python 3.4
with self.assertLogs(level=logging.WARNING) as cm:
client = mqtt.MqttClient(long_id)
for line in cm.output:
if long_id in line and 'too long' in line:
break
else:
self.fail("No warning for long client ID")
else:
# Log test not available
client = mqtt.MqttClient(long_id)
# Client ID must be kept as is
self.assertEqual(client.client_id, long_id)
def test_topic_matches(self):
"""
Tests the topic_matches() method
"""
simple_topics = ('test', 'other_test', 'some-test', '1234')
# Basic test (single level)
for topic in simple_topics:
# Identity
self.assertTrue(mqtt.MqttClient.topic_matches(topic, topic), topic)
# All
self.assertTrue(mqtt.MqttClient.topic_matches('#', topic), topic)
self.assertFalse(mqtt.MqttClient.topic_matches('/#', topic), topic)
# First level
self.assertTrue(mqtt.MqttClient.topic_matches('+', topic), topic)
self.assertFalse(mqtt.MqttClient.topic_matches('/+', topic), topic)
# With a starting '/'
for topic in simple_topics:
topic = '/' + topic
# Identity
self.assertTrue(mqtt.MqttClient.topic_matches(topic, topic), topic)
# All
self.assertTrue(mqtt.MqttClient.topic_matches('#', topic), topic)
# Second level
self.assertTrue(mqtt.MqttClient.topic_matches('/+', topic), topic)
self.assertFalse(mqtt.MqttClient.topic_matches('+', topic), topic)
# Check wildcards
for topic in ('first/second/third/fourth',
'first/third/second/fourth'):
self.assertTrue(mqtt.MqttClient.topic_matches('#', topic))
self.assertTrue(mqtt.MqttClient.topic_matches('first/#', topic))
self.assertFalse(mqtt.MqttClient.topic_matches('first/+', topic))
for part in topic.split('/'):
# Single part...
self.assertFalse(mqtt.MqttClient.topic_matches(part, topic))
# Single-level wildcard
self.assertTrue(
mqtt.MqttClient.topic_matches('first/+/+/fourth', topic))
self.assertFalse(
mqtt.MqttClient.topic_matches('first/+/fourth', topic))
# Invalid filters (text after wildcard
for invalid_filter in ('first/#/fourth', "#/second/#",
"#/third/#", "#/fourth"):
self.assertFalse(
mqtt.MqttClient.topic_matches(invalid_filter, topic))
# ------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| |
import os
import vtk, qt, ctk, slicer
import logging
from SegmentEditorEffects import *
class SegmentEditorEffect(AbstractScriptedSegmentEditorEffect):
"""This effect fills a region enclosed in a segment at clicked position"""
def __init__(self, scriptedEffect):
scriptedEffect.name = 'Flood filling'
scriptedEffect.perSegment = False # this effect operates on all segments at once (not on a single selected segment)
AbstractScriptedSegmentEditorEffect.__init__(self, scriptedEffect)
self.clippedMasterImageData = None
self.lastRoiNodeId = ""
self.lastRoiNodeModifiedTime = 0
def clone(self):
# It should not be necessary to modify this method
import qSlicerSegmentationsEditorEffectsPythonQt as effects
clonedEffect = effects.qSlicerSegmentEditorScriptedEffect(None)
clonedEffect.setPythonSource(__file__.replace('\\','/'))
return clonedEffect
def icon(self):
# It should not be necessary to modify this method
iconPath = os.path.join(os.path.dirname(__file__), 'SegmentEditorEffect.png')
if os.path.exists(iconPath):
return qt.QIcon(iconPath)
return qt.QIcon()
def helpText(self):
return """Fill connected voxels with similar intensity\n.
Click in the image to add voxels that have similar intensity to the clicked voxel.
Masking settings can be used to restrict growing to a specific region.
"""
def activate(self):
# Update intensity range
self.masterVolumeNodeChanged()
def setupOptionsFrame(self):
self.intensityToleranceSlider = ctk.ctkSliderWidget()
self.intensityToleranceSlider.setToolTip("Tolerance.")
self.intensityToleranceSlider.minimum = 0.01
self.intensityToleranceSlider.maximum = 1000.0
self.intensityToleranceSlider.value = 10
self.intensityToleranceSlider.singleStep = 1.0
self.intensityToleranceSlider.pageStep = 5.0
self.intensityToleranceLabel = self.scriptedEffect.addLabeledOptionsWidget("Intensity tolerance:", self.intensityToleranceSlider)
self.neighborhoodSizeMmSlider = ctk.ctkSliderWidget()
self.neighborhoodSizeMmSlider.setToolTip("Regions are added only if all voxels in the neighborhood have similar intensities."
"Use higher values prevent leakage. Use lower values to allow capturing finer details.")
self.neighborhoodSizeMmSlider.minimum = 0.0
self.neighborhoodSizeMmSlider.maximum = 30.0
self.neighborhoodSizeMmSlider.value = 1.0
self.neighborhoodSizeMmSlider.singleStep = 0.01
self.neighborhoodSizeMmSlider.pageStep = 0.5
self.neighborhoodSizeLabel = self.scriptedEffect.addLabeledOptionsWidget("Neighborhood size:", self.neighborhoodSizeMmSlider)
self.neighborhoodSizeMmSlider.connect("valueChanged(double)", self.updateMRMLFromGUI)
self.intensityToleranceSlider.connect("valueChanged(double)", self.updateMRMLFromGUI)
# Add ROI options
self.roiSelector = slicer.qMRMLNodeComboBox()
self.roiSelector.nodeTypes = ['vtkMRMLMarkupsROINode', 'vtkMRMLAnnotationROINode']
self.roiSelector.noneEnabled = True
self.roiSelector.setMRMLScene(slicer.mrmlScene)
self.scriptedEffect.addLabeledOptionsWidget("ROI: ", self.roiSelector)
self.roiSelector.connect("currentNodeChanged(vtkMRMLNode*)", self.updateMRMLFromGUI)
def createCursor(self, widget):
# Turn off effect-specific cursor for this effect
#return slicer.util.mainWindow().cursor
return qt.QCursor(qt.Qt.PointingHandCursor)
def masterVolumeNodeChanged(self):
# Force recomputation of clipped master image data
self.clippedMasterImageData = None
# Set scalar range of master volume image data to threshold slider
import math
import vtkSegmentationCorePython as vtkSegmentationCore
masterImageData = self.scriptedEffect.masterVolumeImageData()
if not masterImageData:
return
# TODO: it might be useful to add a convenience function, which determines size and intensity min/max/step/decimals
# based on the selected master volume's size, spacing, and intensity range
# Intensity slider
lo, hi = masterImageData.GetScalarRange()
if (hi-lo > 0):
range = hi-lo
stepSize = 1
# For floating-point volume: step size is 1/1000th of range (but maximum 1)
if masterImageData.GetScalarType() == vtk.VTK_FLOAT or masterImageData.GetScalarType() == vtk.VTK_DOUBLE:
stepSize = 10**(math.floor(math.log(range/1000.0)/math.log(10)))
if stepSize > 1:
stepSize = 1
self.intensityToleranceSlider.decimals = math.log(stepSize)/math.log(10)
self.intensityToleranceSlider.minimum = stepSize
self.intensityToleranceSlider.maximum = range
self.intensityToleranceSlider.singleStep = stepSize
self.intensityToleranceSlider.pageStep = stepSize*10
# Size slider
minSpacing = min(masterImageData.GetSpacing())
self.neighborhoodSizeMmSlider.maximum = 10**(math.ceil(math.log(minSpacing*100.0)/math.log(10)))
self.neighborhoodSizeMmSlider.singleStep = self.neighborhoodSizeMmSlider.minimum
self.neighborhoodSizeMmSlider.pageStep = self.neighborhoodSizeMmSlider.singleStep*10
def setMRMLDefaults(self):
self.scriptedEffect.setParameterDefault("IntensityTolerance", 10.0)
self.scriptedEffect.setParameterDefault("NeighborhoodSizeMm", 1.0)
self.scriptedEffect.parameterSetNode().SetNodeReferenceID("FloodFilling.ROI", None)
def updateGUIFromMRML(self):
wasBlocked = self.intensityToleranceSlider.blockSignals(True)
self.intensityToleranceSlider.value = self.scriptedEffect.doubleParameter("IntensityTolerance")
self.intensityToleranceSlider.blockSignals(wasBlocked)
wasBlocked = self.neighborhoodSizeMmSlider.blockSignals(True)
self.neighborhoodSizeMmSlider.value = self.scriptedEffect.doubleParameter("NeighborhoodSizeMm")
self.neighborhoodSizeMmSlider.blockSignals(wasBlocked)
wasBlocked = self.roiSelector.blockSignals(True)
self.roiSelector.setCurrentNode(self.scriptedEffect.parameterSetNode().GetNodeReference("FloodFilling.ROI"))
self.roiSelector.blockSignals(wasBlocked)
def updateMRMLFromGUI(self):
self.scriptedEffect.setParameter("IntensityTolerance", self.intensityToleranceSlider.value)
self.scriptedEffect.setParameter("NeighborhoodSizeMm", self.neighborhoodSizeMmSlider.value)
self.scriptedEffect.parameterSetNode().SetNodeReferenceID("FloodFilling.ROI", self.roiSelector.currentNodeID)
def getClippedMasterImageData(self):
# Return masterImageData unchanged if there is no ROI
masterImageData = self.scriptedEffect.masterVolumeImageData()
roiNode = self.roiSelector.currentNode()
if roiNode is None or masterImageData is None:
self.clippedMasterImageData = None
self.lastRoiNodeId = ""
self.lastRoiNodeModifiedTime = 0
return masterImageData
# Return last clipped image data if there was no change
if (self.clippedMasterImageData is not None
and roiNode.GetID() == self.lastRoiNodeId
and roiNode.GetMTime() == self.lastRoiNodeModifiedTime):
# Use cached clipped master image data
return self.clippedMasterImageData
# Compute clipped master image
import SegmentEditorLocalThresholdLib
self.clippedMasterImageData = SegmentEditorLocalThresholdLib.SegmentEditorEffect.cropOrientedImage(masterImageData, roiNode)
self.lastRoiNodeId = roiNode.GetID()
self.lastRoiNodeModifiedTime = roiNode.GetMTime()
return self.clippedMasterImageData
def processInteractionEvents(self, callerInteractor, eventId, viewWidget):
abortEvent = False
# Only allow for slice views
if viewWidget.className() != "qMRMLSliceWidget":
return abortEvent
if eventId == vtk.vtkCommand.LeftButtonPressEvent:
# This can be a long operation - indicate it to the user
qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
try:
xy = callerInteractor.GetEventPosition()
import vtkSegmentationCorePython as vtkSegmentationCore
masterImageData = self.getClippedMasterImageData()
ijk = self.xyToIjk(xy, viewWidget, masterImageData)
self.floodFillFromPoint(ijk)
except IndexError:
logging.error('apply: Failed to threshold master volume!')
finally:
qt.QApplication.restoreOverrideCursor()
abortEvent = True
return abortEvent
def floodFillFromPoint(self, ijk):
"""Fills the segment taking based on the current master volume.
Input IJK position is voxel coordinates of master volume.
"""
self.scriptedEffect.saveStateForUndo()
# Get master volume image data
import vtkSegmentationCorePython as vtkSegmentationCore
masterImageData = self.getClippedMasterImageData()
selectedSegmentLabelmap = self.scriptedEffect.selectedSegmentLabelmap()
# Get modifier labelmap
modifierLabelmap = self.scriptedEffect.defaultModifierLabelmap()
pixelValue = masterImageData.GetScalarComponentAsFloat(ijk[0], ijk[1], ijk[2], 0)
useSegmentationAsStencil = False
# Perform thresholding
floodFillingFilter = vtk.vtkImageThresholdConnectivity()
floodFillingFilter.SetInputData(masterImageData)
seedPoints = vtk.vtkPoints()
origin = masterImageData.GetOrigin()
spacing = masterImageData.GetSpacing()
seedPoints.InsertNextPoint(origin[0]+ijk[0]*spacing[0], origin[1]+ijk[1]*spacing[1], origin[2]+ijk[2]*spacing[2])
floodFillingFilter.SetSeedPoints(seedPoints)
maskImageData = vtkSegmentationCore.vtkOrientedImageData()
intensityBasedMasking = self.scriptedEffect.parameterSetNode().GetMasterVolumeIntensityMask()
segmentationNode = self.scriptedEffect.parameterSetNode().GetSegmentationNode()
success = segmentationNode.GenerateEditMask(maskImageData,
self.scriptedEffect.parameterSetNode().GetMaskMode(),
masterImageData, # reference geometry
self.scriptedEffect.parameterSetNode().GetSelectedSegmentID(),
self.scriptedEffect.parameterSetNode().GetMaskSegmentID() if self.scriptedEffect.parameterSetNode().GetMaskSegmentID() else "",
masterImageData if intensityBasedMasking else None,
self.scriptedEffect.parameterSetNode().GetMasterVolumeIntensityMaskRange() if intensityBasedMasking else None)
if success:
stencil = vtk.vtkImageToImageStencil()
stencil.SetInputData(maskImageData)
stencil.ThresholdByLower(0)
stencil.Update()
floodFillingFilter.SetStencilData(stencil.GetOutput())
else:
logging.error("Failed to create edit mask")
neighborhoodSizeMm = self.neighborhoodSizeMmSlider.value
floodFillingFilter.SetNeighborhoodRadius(neighborhoodSizeMm,neighborhoodSizeMm,neighborhoodSizeMm)
floodFillingFilter.SetNeighborhoodFraction(0.5)
if useSegmentationAsStencil:
stencilFilter = vtk.vtkImageToImageStencil()
stencilFilter.SetInputData(selectedSegmentLabelmap)
stencilFilter.ThresholdByLower(0)
stencilFilter.Update()
floodFillingFilter.SetStencilData(stencilFilter.GetOutput())
pixelValueTolerance = float(self.intensityToleranceSlider.value)
floodFillingFilter.ThresholdBetween(pixelValue-pixelValueTolerance, pixelValue+pixelValueTolerance)
floodFillingFilter.SetInValue(1)
floodFillingFilter.SetOutValue(0)
floodFillingFilter.Update()
modifierLabelmap.DeepCopy(floodFillingFilter.GetOutput())
# Apply changes
self.scriptedEffect.modifySelectedSegmentByLabelmap(modifierLabelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeAdd)
| |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <stevedh@eecs.berkeley.edu>
"""
"""Driver for the Power Standards Lab pQube meter.
The driver scrapes the html spit out by the meter, making it dependent
on a particular interface.
Config options:
Address : url of the meter
Rate [default 10] : polling period
"""
import sys
import logging
import time
import calendar
import threading
import urllib2
import httplib
import urlparse
import re
import operator
import struct
from twisted.python import log
from smap.driver import SmapDriver
from smap.drivers import modbus
from smap.drivers.modbus import ModbusRegister as R
from smap.util import periodicSequentialCall
urllib2.install_opener(urllib2.build_opener())
def p(val):
return float(val[0])
def kwh_mwh_parser(val):
if val[1] == 'kwh':
return float(val[0])
else:
return float(val[0]) * 1000
PQUBE_POINTS = [
('L1-N', r'^(\d+\.\d+)', 'A', 'phase-neutral_voltage', 'V', p),
('L2-N', r'^(\d+\.\d+)', 'B', 'phase-neutral_voltage', 'V', p),
('L3-N', r'^(\d+\.\d+)', 'C', 'phase-neutral_voltage', 'V', p),
('L1 Amp', r'^(\d+\.\d+)', 'A', 'current', 'A', p),
('L2 Amp', r'^(\d+\.\d+)', 'B', 'current', 'A', p),
('L3 Amp', r'^(\d+\.\d+)', 'C', 'current', 'A', p),
# ('L1-N Voltage Fundamental', r'^(\d+\.\d+)', 'A', 'a-n fundamental voltage', p),
# ('L1-N Voltage Fundamental', r'^(\d+\.\d+)', 'A', 'a-n fundamental phase', p),
# ('L2-N Voltage Fundamental', r'^(\d+\.\d+)', 'B', 'a-n fundamental voltage', p),
# ('L2-N Voltage Fundamental', r'^(\d+\.\d+)', 'B', 'a-n fundamental phase', p),
# ('L3-N Voltage Fundamental', r'^(\d+\.\d+)', 'C', 'a-n fundamental voltage', p),
# ('L3-N Voltage Fundamental', r'^(\d+\.\d+)', 'C', 'a-n fundamental phase', p),
('Frequency', r'^(\d+\.\d+)', 'ABC', 'line_frequency', 'Hz', p),
('Voltage THD', r'^(\d+\.\d+)', 'ABC', 'voltage_thd', 'pct', p),
('Current TDD', r'^(\d+\.\d+)', 'ABC', 'current_tdd', 'pct', p),
('L1-L2', r'^(\d+\.\d+)', 'AB', 'volts', 'V', p),
('L2-L3', r'^(\d+\.\d+)', 'BC', 'volts', 'V', p),
('L3-L1', r'^(\d+\.\d+)', 'AC', 'volts', 'V', p),
('Power', r'^(\d+\.\d+)', 'ABC', 'true_power', 'kW', p),
('Apparent Power', r'^(\d+\.\d+)', 'ABC', 'apparent_power', 'kVA', p),
('Reactive Power', r'^(\d+\.\d+)', 'ABC', 'reactive_power', 'kVAR', p),
('True Power Factor', r'^(\d+\.\d+)', 'ABC', 'pf', 'PF', p),
# meters
('Energy', r'^(\d+\.\d+)(kwh|mwh)', 'ABC', 'true_energy', 'kWh', kwh_mwh_parser),
('Apparent Energy', r'^(\d+\.\d+)', 'ABC', 'apparent_energy', 'kVAh', p),
]
class PQube(SmapDriver):
def setup(self, opts):
self.serverloc = opts['Address']
self.rate = int(opts.get('Rate', 10))
for (field, regexp, phase, channel, fmt, vparser) in PQUBE_POINTS:
self.add_timeseries('/%s/%s' % (phase, channel), fmt, data_type="double")
self.set_metadata('/%s' % phase, {
'Extra/Phase' : phase})
self.set_metadata('/', {
'Instrument/Manufacturer' : 'Power Standards Laboratory',
'Instrument/SamplingPeriod' : str(self.rate),
'Extra/Driver' : 'smap.drivers.pqube.PQube',
})
def start(self):
periodicSequentialCall(self.update).start(self.rate)
def update(self):
logging.debug("Updating " + self.serverloc)
try:
fp = urllib2.urlopen(self.serverloc + '/Meters.htm', timeout=15)
html = fp.read()
except IOError, e:
logging.error("IOError while reading pqube: url: %s exception: %s" %
(self.serverloc, str(e)))
return
except httplib.HTTPException, e:
logging.error("HTTP exception reading pqube: url: %s exception: %s" %
(self.serverloc, str(e)))
return
reading_time = int(time.time())
# this pulls out a list of all the channel-reading pairs
data = re.findall('<td.*? class="channel">(.*?)</td>.*?<td.*?>(.*?)</td>',
html.lower())
data = [(re.sub('<.*>', '', k), v) for (k,v) in data]
data_map = {}
data_map.update(data)
for (field, regexp, phase, channel, fmt, vparser) in PQUBE_POINTS:
reading = data_map.get(field.lower())
if not reading:
logging.warn(field + " not found in doc")
continue
match = re.search(regexp, reading)
if not match:
logging.warn("reading conversion failed: " + reading)
continue
self.add('/%s/%s' % (phase, channel), reading_time, vparser(match.groups(0)))
class PQubeModbus(modbus.ModbusDriver):
# modbus registers
# reg number : (description, phase, channelname, units)
BASE = 7000
REGISTERS = {
0 : R('/A/phase-earth_voltage', 'V', modbus.float),
2 : R('/B/phase-earth_voltage', 'V', modbus.float),
4 : R('/C/phase-earth_voltage', 'V', modbus.float),
8 : R('/A/phase-neutral_voltage', 'V', modbus.float),
10 : R('/B/phase-neutral_voltage', 'V', modbus.float),
12 : R('/C/phase-neutral_voltage', 'V', modbus.float),
28 : R('/A/current', 'A', modbus.float, 'L1 Amp'),
30 : R('/B/current', 'A', modbus.float, 'L2 Amp'),
32 : R('/C/current', 'A', modbus.float, 'L3 Amp'),
26 : R('/ABC/line_frequency', 'Hz', modbus.float, 'Frequency'),
64 : R('/ABC/voltage_thd', 'pct', modbus.float, 'Voltage THD'),
66 : R('/ABC/current_tdd', 'pct', modbus.float, 'Current TDD'),
14 : R('/AB/volts', 'V', modbus.float, 'L1-L2'),
16 : R('/BC/volts', 'V', modbus.float, 'L2-L3'),
18 : R('/AC/volts', 'V', modbus.float, 'L3-L1'),
36 : R('/ABC/true_power', 'W', modbus.float, 'Power'),
38 : R('/ABC/apparent_power', 'VA', modbus.float, 'Apparent Power'),
80 : R('/ABC/reactive_power', 'VAR', modbus.float, 'Reactive Power'),
82 : R('/ABC/pf', 'PF', modbus.float, 'True Power Factor'),
# meters
60 : R('/ABC/true_energy', 'Wh', modbus.float),
62 : R('/ABC/apparent_energy', 'VAh', modbus.float),
}
METADATA = {
'Instrument/Manufacturer' : 'Power Standards Laboratory',
'Extra/Driver' : 'smap.drivers.pqube.PQube',
}
| |
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import lxml.etree as et
import stix
import stix.utils as utils
import stix.common as common
import stix.bindings.extensions.identity.ciq_identity_3_0 as ciq_identity_binding
XML_NS_XPIL = "urn:oasis:names:tc:ciq:xpil:3"
XML_NS_XNL = "urn:oasis:names:tc:ciq:xnl:3"
XML_NS_XAL = "urn:oasis:names:tc:ciq:xal:3"
XML_NS_STIX_EXT = "http://stix.mitre.org/extensions/Identity#CIQIdentity3.0-1"
et.register_namespace('xpil', XML_NS_XPIL)
et.register_namespace('xnl', XML_NS_XNL)
et.register_namespace('xal', XML_NS_XAL)
et.register_namespace('ExtSch', XML_NS_STIX_EXT)
@stix.register_extension
class CIQIdentity3_0Instance(common.Identity):
_binding = ciq_identity_binding
_binding_class = _binding.CIQIdentity3_0InstanceType
_namespace = "http://stix.mitre.org/extensions/Identity#CIQIdentity3.0-1"
_XML_NS_PREFIX = "ciqIdentity"
_XML_TYPE = "CIQIdentity3.0InstanceType"
_XSI_TYPE = "ciqIdentity:CIQIdentity3.0InstanceType"
def __init__(self, roles=None, specification=None):
super(CIQIdentity3_0Instance, self).__init__()
self.roles = roles
self.specification = specification if specification else STIXCIQIdentity3_0()
@property
def roles(self):
return self._roles
@roles.setter
def roles(self, valuelist):
self._roles = []
if not valuelist:
return
for role in valuelist:
self.add_role(role)
def add_role(self, role):
if not isinstance(role, basestring):
raise ValueError('role is not instance of basestring')
self.roles.append(role)
@property
def specification(self):
return self._specification
@specification.setter
def specification(self, value):
if value and not isinstance(value, STIXCIQIdentity3_0):
raise ValueError('value not instance of STIXCIQIdentity3_0Type')
self._specification = value
def to_obj(self, return_obj=None, ns_info=None):
if not return_obj:
return_obj = self._binding_class()
super(CIQIdentity3_0Instance, self).to_obj(return_obj)
# return_obj.id = self.id_
# return_obj.idref = self.idref_
return_obj.xsi_type = self._XSI_TYPE
if self.roles:
for role in self.roles:
return_obj.add_Role(role)
if self.specification:
return_obj.Specification = self.specification.to_obj(ns_info=ns_info)
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
super(CIQIdentity3_0Instance, cls).from_obj(obj, return_obj)
roles = obj.Role
specification = obj.Specification
if roles:
for role in roles:
return_obj.add_role(role)
if specification is not None:
return_obj.specification = STIXCIQIdentity3_0.from_obj(specification)
return return_obj
def to_dict(self):
d = super(CIQIdentity3_0Instance, self).to_dict()
d['xsi:type'] = self._XSI_TYPE
if self.roles:
d['roles'] = [str(x) for x in self.roles]
if self.specification:
d['specification'] = self.specification.to_dict()
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
super(CIQIdentity3_0Instance, cls).from_dict(dict_repr, return_obj)
roles = dict_repr.get('roles', [])
specification = dict_repr.get('specification')
for role in roles:
return_obj.add_role(role)
if specification:
return_obj.specification = STIXCIQIdentity3_0.from_dict(specification)
return return_obj
class STIXCIQIdentity3_0(stix.Entity):
_namespace = "http://stix.mitre.org/extensions/Identity#CIQIdentity3.0-1"
XML_TAG = "{%s}Specification" % _namespace
def __init__(self, party_name=None, languages=None, addresses=None,
organisation_info=None, electronic_address_identifiers=None,
free_text_lines=None, contact_numbers=None, nationalities=None):
self.party_name = party_name
self.languages = languages
self.addresses = addresses
self.organisation_info = organisation_info
self.electronic_address_identifiers = electronic_address_identifiers
self.free_text_lines = free_text_lines
self.contact_numbers = contact_numbers
self.nationalities = nationalities
@property
def addresses(self):
return self._addresses
@addresses.setter
def addresses(self, value):
self._addresses = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_address(v)
else:
self.add_address(value)
def add_address(self, value):
if not value:
return
elif isinstance(value, Address):
self.addresses.append(value)
else:
raise ValueError('value must be instance of Address')
@property
def languages(self):
return self._languages
@languages.setter
def languages(self, value):
self._languages = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_language(v)
else:
self.add_language(value)
def add_language(self, value):
if not value:
return
elif isinstance(value, Language):
self.languages.append(value)
else:
self.languages.append(Language(value))
@property
def party_name(self):
return self._party_name
@party_name.setter
def party_name(self, value):
if not value:
self._party_name = None
elif isinstance(value, PartyName):
self._party_name = value
else:
raise ValueError('party_name must be instance of PartyName')
@property
def electronic_address_identifiers(self):
return self._electronic_address_identifiers
@electronic_address_identifiers.setter
def electronic_address_identifiers(self, value):
self._electronic_address_identifiers = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_electronic_address_identifier(v)
else:
self.add_electronic_address_identifier(value)
def add_electronic_address_identifier(self, value):
if not value:
return
elif isinstance(value, ElectronicAddressIdentifier):
self.electronic_address_identifiers.append(value)
else:
self.electronic_address_identifiers.append(ElectronicAddressIdentifier(value))
@property
def free_text_lines(self):
return self._free_text_lines
@free_text_lines.setter
def free_text_lines(self, value):
self._free_text_lines = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_free_text_line(v)
else:
self.add_free_text_line(value)
def add_free_text_line(self, value):
if not value:
return
elif isinstance(value, FreeTextLine):
self.free_text_lines.append(value)
else:
self.free_text_lines.append(FreeTextLine(value))
@property
def contact_numbers(self):
return self._contact_numbers
@contact_numbers.setter
def contact_numbers(self, value):
self._contact_numbers = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_contact_number(v)
else:
self.add_contact_number(value)
def add_contact_number(self, value):
if not value:
return
elif isinstance(value, ContactNumber):
self.contact_numbers.append(value)
else:
self.contact_numbers.append(ContactNumber(value))
@property
def nationalities(self):
return self._nationalities
@nationalities.setter
def nationalities(self, value):
self._nationalities = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_nationality(v)
else:
self.add_nationality(value)
def add_nationality(self, value):
if not value:
return
elif isinstance(value, Country):
self.nationalities.append(value)
else:
self.nationalities.append(Country(value))
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
party_name = obj.findall(PartyName.XML_TAG)
if party_name is not None and len(party_name) > 0:
return_obj.party_name = PartyName.from_obj(party_name[0])
languages = obj.findall("{%s}Languages" % XML_NS_XPIL)
if languages is not None and len(languages) > 0:
return_obj.languages = [Language.from_obj(x) for x in languages[0]]
addresses = obj.findall("{%s}Addresses" % XML_NS_XPIL)
if addresses is not None and len(addresses) > 0:
return_obj.addresses = [Address.from_obj(x) for x in addresses[0]]
nationalities = obj.findall("{%s}Nationalities" % XML_NS_XPIL)
if nationalities is not None and len(nationalities) > 0:
return_obj.nationalities = [Country.from_obj(x) for x in nationalities[0]]
organisation_info = obj.findall(OrganisationInfo.XML_TAG)
if organisation_info is not None and len(organisation_info) > 0:
return_obj.organisation_info = OrganisationInfo.from_obj(organisation_info[0])
electronic_address_identifiers = obj.findall("{%s}ElectronicAddressIdentifiers" % XML_NS_XPIL)
if electronic_address_identifiers is not None and len(electronic_address_identifiers) > 0:
return_obj.electronic_address_identifiers = [ElectronicAddressIdentifier.from_obj(x) for x in electronic_address_identifiers[0]]
free_text_lines = obj.findall("{%s}FreeTextLines" % XML_NS_XPIL)
if free_text_lines is not None and len(free_text_lines) > 0:
return_obj.free_text_lines = [FreeTextLine.from_obj(x) for x in free_text_lines[0]]
contact_numbers = obj.findall("{%s}ContactNumbers" % XML_NS_XPIL)
if contact_numbers is not None and len(contact_numbers) > 0:
return_obj.contact_numbers = [ContactNumber.from_obj(x) for x in contact_numbers[0]]
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
super(STIXCIQIdentity3_0, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
root_tag = STIXCIQIdentity3_0.XML_TAG
return_obj = et.Element(root_tag)
if self.free_text_lines:
ftl_root = et.Element("{%s}FreeTextLines" % XML_NS_XPIL)
return_obj.append(ftl_root)
for ftl in self.free_text_lines:
ftl_root.append(ftl.to_obj(ns_info=ns_info))
if self.party_name:
return_obj.append(self.party_name.to_obj(ns_info=ns_info))
if self.addresses:
addresses_root = et.Element("{%s}Addresses" % XML_NS_XPIL)
return_obj.append(addresses_root)
for address in self.addresses:
addresses_root.append(address.to_obj(ns_info=ns_info))
if self.contact_numbers:
contact_numbers_root = et.Element("{%s}ContactNumbers" % XML_NS_XPIL)
return_obj.append(contact_numbers_root)
for contact_number in self.contact_numbers:
contact_numbers_root.append(contact_number.to_obj(ns_info=ns_info))
if self.electronic_address_identifiers:
eai_root = et.Element("{%s}ElectronicAddressIdentifiers" % XML_NS_XPIL)
return_obj.append(eai_root)
for eai in self.electronic_address_identifiers:
eai_root.append(eai.to_obj(ns_info=ns_info))
if self.organisation_info:
return_obj.append(self.organisation_info.to_obj(ns_info=ns_info))
if self.languages:
languages_root = et.Element("{%s}Languages" % XML_NS_XPIL)
return_obj.append(languages_root)
for language in self.languages:
languages_root.append(language.to_obj(ns_info=ns_info))
if self.nationalities:
nationalities_root = et.Element("{%s}Nationalities" % XML_NS_XPIL)
return_obj.append(nationalities_root)
for country in self.nationalities:
country_obj = country.to_obj(ns_info=ns_info)
country_obj.tag = "{%s}Country" % XML_NS_XPIL
nationalities_root.append(country_obj)
return return_obj
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
return_obj.party_name = PartyName.from_dict(dict_repr.get('party_name'))
return_obj.languages = [Language.from_dict(x) for x in dict_repr.get('languages', [])]
return_obj.addresses = [Address.from_dict(x) for x in dict_repr.get('addresses', [])]
return_obj.electronic_address_identifiers = [ElectronicAddressIdentifier.from_dict(x) for x in dict_repr.get('electronic_address_identifiers', [])]
return_obj.free_text_lines = [FreeTextLine.from_dict(x) for x in dict_repr.get('free_text_lines', [])]
return_obj.contact_numbers = [ContactNumber.from_dict(x) for x in dict_repr.get('contact_numbers', [])]
return_obj.nationalities = [Country.from_dict(x) for x in dict_repr.get('nationalities', [])]
return return_obj
def to_dict(self):
d = {}
if self.party_name:
d['party_name'] = self.party_name.to_dict()
if self.languages:
d['languages'] = [x.to_dict() for x in self.languages]
if self.addresses:
d['addresses'] = [x.to_dict() for x in self.addresses]
if self.electronic_address_identifiers:
d['electronic_address_identifiers'] = [x.to_dict() for x in self.electronic_address_identifiers]
if self.free_text_lines:
d['free_text_lines'] = [x.to_dict() for x in self.free_text_lines]
if self.contact_numbers:
d['contact_numbers'] = [x.to_dict() for x in self.contact_numbers]
if self.nationalities:
d['nationalities'] = [x.to_dict() for x in self.nationalities]
return d
class Address(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}Address" % _namespace
def __init__(self, free_text_address=None, country=None, administrative_area=None):
self.free_text_address = free_text_address
self.country = country
self.administrative_area = administrative_area
@property
def country(self):
return self._country
@country.setter
def country(self, value):
self._set_var(Country, country=value)
@property
def administrative_area(self):
return self._administrative_area
@administrative_area.setter
def administrative_area(self, value):
self._set_var(AdministrativeArea, administrative_area=value)
@property
def free_text_address(self):
return self._free_text_address
@free_text_address.setter
def free_text_address(self, value):
self._set_var(FreeTextAddress, free_text_address=value)
def to_obj(self, return_obj=None, ns_info=None):
super(Address, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = et.Element(self.XML_TAG)
if self.free_text_address:
return_obj.append(self.free_text_address.to_obj(ns_info=ns_info))
if self.country:
return_obj.append(self.country.to_obj(ns_info=ns_info))
if self.administrative_area:
return_obj.append(self.administrative_area.to_obj(ns_info=ns_info))
return return_obj
def to_dict(self):
d = {}
if self.free_text_address:
d['free_text_address'] = self.free_text_address.to_dict()
if self.country:
d['country'] = self.country.to_dict()
if self.administrative_area:
d['administrative_area'] = self.administrative_area.to_dict()
return d
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
free_text_address = obj.findall("{%s}FreeTextAddress" % XML_NS_XAL)
if len(free_text_address) > 0:
return_obj.free_text_address = FreeTextAddress.from_obj(free_text_address[0])
country = obj.findall("{%s}Country" % XML_NS_XAL)
if len(country) > 0:
return_obj.country = Country.from_obj(country[0])
administrative_area = obj.findall("{%s}AdministrativeArea" % XML_NS_XAL)
if len(administrative_area) > 0:
return_obj.administrative_area = AdministrativeArea.from_obj(administrative_area[0])
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.free_text_address = FreeTextAddress.from_dict(d.get('free_text_address'))
return_obj.country = Country.from_dict(d.get('country'))
return_obj.administrative_area = AdministrativeArea.from_dict(d.get('administrative_area'))
return return_obj
class AdministrativeArea(stix.Entity):
_namespace = XML_NS_XAL
XML_TAG = "{%s}AdministrativeArea" % _namespace
def __init__(self, name_elements=None):
self.name_elements = name_elements
@property
def name_elements(self):
return self._name_elements
@name_elements.setter
def name_elements(self, value):
self._name_elements = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_name_element(v)
else:
self.add_name_element(value)
def add_name_element(self, value):
if not value:
return
elif isinstance(value, NameElement):
self.name_elements.append(value)
else:
self.name_elements.append(NameElement(value))
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
name_elements = obj.findall(NameElement.XML_TAG)
if name_elements:
for name_element in name_elements:
return_obj.name_elements.append(NameElement.from_obj(name_element))
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
super(AdministrativeArea, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = et.Element(self.XML_TAG)
for name_element in self.name_elements:
return_obj.append(name_element.to_obj(ns_info=ns_info))
return return_obj
def to_dict(self):
d = {}
if self.name_elements:
d['name_elements'] = [x.to_dict() for x in self.name_elements]
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.name_elements = [NameElement.from_dict(x) for x in d.get('name_elements', [])]
return return_obj
class Country(stix.Entity):
_namespace = XML_NS_XAL
XML_TAG = "{%s}Country" % _namespace
def __init__(self, name_elements=None):
self.name_elements = name_elements
@property
def name_elements(self):
return self._name_elements
@name_elements.setter
def name_elements(self, value):
self._name_elements = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_name_element(v)
else:
self.add_name_element(value)
def add_name_element(self, value):
if not value:
return
elif isinstance(value, NameElement):
self.name_elements.append(value)
else:
self.name_elements.append(NameElement(value))
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
name_elements = obj.findall("{%s}NameElement" % XML_NS_XAL)
if name_elements:
for name_element in name_elements:
return_obj.name_elements.append(NameElement.from_obj(name_element))
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
super(Country, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = et.Element(self.XML_TAG)
for name_element in self.name_elements:
return_obj.append(name_element.to_obj(ns_info=ns_info))
return return_obj
def to_dict(self):
d = {}
if self.name_elements:
d['name_elements'] = [x.to_dict() for x in self.name_elements]
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.name_elements = [NameElement.from_dict(x) for x in d.get('name_elements', [])]
return return_obj
class NameElement(stix.Entity):
_namespace = XML_NS_XAL
XML_TAG = "{%s}NameElement" % XML_NS_XAL
def __init__(self, value=None):
self.value = value
def to_obj(self, return_obj=None, ns_info=None):
super(NameElement, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj = et.Element(self.XML_TAG)
return_obj.text = self.value
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {}
if self.value:
d['value'] = self.value
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.value = d.get('value')
return return_obj
class FreeTextAddress(stix.Entity):
_namespace = XML_NS_XAL
XML_TAG = "{%s}FreeTextAddress" % XML_NS_XAL
def __init__(self, address_lines=None):
self.address_lines = address_lines
@property
def address_lines(self):
return self._address_lines
@address_lines.setter
def address_lines(self, value):
self._address_lines = []
if value is None or len(value) == 0:
return
elif utils.is_sequence(value):
self._address_lines = value
else:
self._address_lines.append(value)
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None or len(obj) == 0:
return None
if not return_obj:
return_obj = cls()
address_line_tag = "{%s}AddressLine" % XML_NS_XAL
address_lines = obj.findall(address_line_tag)
if address_lines:
for address_line in address_lines:
return_obj.address_lines.append(address_line.text)
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
super(FreeTextAddress, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
return_obj = et.Element(self.XML_TAG)
for address in self.address_lines:
address_line = et.Element("{%s}AddressLine" % XML_NS_XAL)
address_line.text = address
return_obj.append(address_line)
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.address_lines = d.get('address_lines', [])
return return_obj
def to_dict(self):
d = {}
if self.address_lines:
d['address_lines'] = self.address_lines
return d
class PartyName(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}PartyName" % _namespace
def __init__(self, name_lines=None, person_names=None, organisation_names=None):
self.name_lines = []
self.person_names = []
self.organisation_names = []
if name_lines:
for value in name_lines:
self.add_name_line(value)
if person_names:
for value in person_names:
self.add_person_name(value)
if organisation_names:
for value in organisation_names:
self.add_organisation_name(value)
def add_name_line(self, value):
if isinstance(value, basestring):
self.name_lines.append(NameLine(value))
elif isinstance(value, NameLine):
self.name_lines.append(value)
else:
raise ValueError('value must be a basestring or NameLine instance')
def add_person_name(self, value):
if isinstance(value, basestring):
self.person_names.append(PersonName(name_elements=[value]))
elif isinstance(value, PersonName):
self.person_names.append(value)
else:
raise ValueError('value must be instance of PersonName or basestring')
def add_organisation_name(self, value):
if isinstance(value, basestring):
self.organisation_names.append(OrganisationName(name_elements=[value]))
elif isinstance(value, OrganisationName):
self.organisation_names.append(value)
else:
raise ValueError('value must be instance of OrganisationName')
def to_obj(self, return_obj=None, ns_info=None):
super(PartyName, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
root_tag = PartyName.XML_TAG
return_obj = et.Element(root_tag)
for name_line in self.name_lines:
return_obj.append(name_line.to_obj(ns_info=ns_info))
for person_name in self.person_names:
return_obj.append(person_name.to_obj(ns_info=ns_info))
for organisation_name in self.organisation_names:
return_obj.append(organisation_name.to_obj(ns_info=ns_info))
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
name_lines = obj.findall(NameLine.XML_TAG)
if name_lines:
for name_line_obj in name_lines:
name_line = NameLine.from_obj(name_line_obj)
return_obj.add_name_line(name_line)
person_names = obj.findall(PersonName.XML_TAG)
if person_names:
for person_name_obj in person_names:
person_name = PersonName.from_obj(person_name_obj)
return_obj.add_person_name(person_name)
org_names = obj.findall(OrganisationName.XML_TAG)
if org_names:
for organisation_name_obj in org_names:
org_name = OrganisationName.from_obj(organisation_name_obj)
return_obj.add_organisation_name(org_name)
return return_obj
def to_dict(self):
d = {}
if self.name_lines:
for name_line in self.name_lines:
d.setdefault('name_lines', []).append(name_line.to_dict())
if self.organisation_names:
for on in self.organisation_names:
d.setdefault('organisation_names', []).append(on.to_dict())
if self.person_names:
for pn in self.person_names:
d.setdefault('person_names', []).append(pn.to_dict())
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
nl_dicts = dict_repr.get('name_lines', [])
on_dicts = dict_repr.get('organisation_names', [])
pn_dicts = dict_repr.get('person_names', [])
for nl in nl_dicts:
name_line = NameLine.from_dict(nl)
return_obj.add_name_line(name_line)
for on in on_dicts:
organisation_name = OrganisationName.from_dict(on)
return_obj.add_organisation_name(organisation_name)
for pn in pn_dicts:
person_name = PersonName.from_dict(pn)
return_obj.add_person_name(person_name)
return return_obj
class NameLine(stix.Entity):
_namespace = XML_NS_XNL
XML_TAG = "{%s}NameLine" % _namespace
def __init__(self, value=None, type_=None):
self.value = value
self.type = type_
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value and not isinstance(value, basestring):
raise ValueError('value must be instance of basestring')
self._value = value
def to_obj(self, return_obj=None, ns_info=None):
super(NameLine, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
root_tag = NameLine.XML_TAG
return_obj = et.Element(root_tag)
if self.type:
return_obj.attrib['Type'] = self.type
if self.value:
return_obj.text = self.value
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.value = obj.text
return_obj.type = obj.get('Type')
return return_obj
def to_dict(self):
d = {'value': self.value}
if self.type:
d['type'] = self.type
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
return_obj.value = dict_repr.get('value', None)
return_obj.type = dict_repr.get('type', None)
return return_obj
class PersonName(stix.Entity):
_namespace = XML_NS_XNL
XML_TAG = "{%s}PersonName" % _namespace
def __init__(self, name_elements=None):
self.name_elements = []
if name_elements:
for name_element in name_elements:
self.add_name_element(name_element)
def add_name_element(self, value):
if isinstance(value, basestring):
self.name_elements.append(PersonNameElement(value=value))
elif isinstance(value, PersonNameElement):
self.name_elements.append(value)
else:
raise ValueError('value must be instance of PersonNameElement')
def to_obj(self, return_obj=None, ns_info=None):
super(PersonName, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
root_tag = PersonName.XML_TAG
return_obj = et.Element(root_tag)
for name_element in self.name_elements:
return_obj.append(name_element.to_obj(ns_info=ns_info))
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
name_elements = obj.findall(PersonNameElement.XML_TAG)
if name_elements:
for name_element_obj in name_elements:
person_name_element = PersonNameElement.from_obj(name_element_obj)
return_obj.add_name_element(person_name_element)
return return_obj
def to_dict(self):
d = {}
if self.name_elements:
for ne in self.name_elements:
d.setdefault('name_elements', []).append(ne.to_dict())
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
ne_dicts = dict_repr.get('name_elements', [])
for ne_dict in ne_dicts:
return_obj.add_name_element(PersonNameElement.from_dict(ne_dict))
return return_obj
class OrganisationName(stix.Entity):
_namespace = XML_NS_XNL
XML_TAG = "{%s}OrganisationName" % _namespace
def __init__(self, name_elements=None, subdivision_names=None, type_=None):
self.type_ = type_
self.name_elements = name_elements
self.subdivision_names = subdivision_names
@property
def name_elements(self):
return self._name_elements
@name_elements.setter
def name_elements(self, value):
self._name_elements = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_organisation_name_element(v)
else:
self.add_organisation_name_element(value)
def add_organisation_name_element(self, value):
if isinstance(value, basestring):
self.name_elements.append(OrganisationNameElement(value=value))
elif isinstance(value, OrganisationNameElement):
self.name_elements.append(value)
else:
raise ValueError('value must be instance of OrganisationNameElement')
@property
def subdivision_names(self):
return self._subdivision_names
@subdivision_names.setter
def subdivision_names(self, value):
self._subdivision_names = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_subdivision_name(v)
else:
self.add_subdivision_name(value)
def add_subdivision_name(self, value):
if not isinstance(value, SubDivisionName):
raise ValueError('value must be instance of SubDivisionName')
self.subdivision_names.append(value)
def to_obj(self, return_obj=None, ns_info=None):
super(OrganisationName, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
root_tag = OrganisationName.XML_TAG
return_obj = et.Element(root_tag)
if self.type_:
return_obj.attrib['{%s}Type' % XML_NS_XNL] = self.type_
for name_element in self.name_elements:
return_obj.append(name_element.to_obj(ns_info=ns_info))
for subdivision_name in self.subdivision_names:
return_obj.append(subdivision_name.to_obj(ns_info=ns_info))
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.type_ = obj.attrib.get('{%s}Type' % XML_NS_XNL)
name_elements = obj.findall(OrganisationNameElement.XML_TAG)
if name_elements:
for name_element_obj in name_elements:
name_element = OrganisationNameElement.from_obj(name_element_obj)
return_obj.add_organisation_name_element(name_element)
sub_division_names = obj.findall(SubDivisionName.XML_TAG)
if sub_division_names:
for sub_division_name_obj in sub_division_names:
sub_division_name = SubDivisionName.from_obj(sub_division_name_obj)
return_obj.add_subdivision_name(sub_division_name)
return return_obj
def to_dict(self):
d = {}
if self.type_:
d['type'] = self.type_
if self.name_elements:
for ne in self.name_elements:
d.setdefault('name_elements', []).append(ne.to_dict())
if self.subdivision_names:
for sn in self.subdivision_names:
d.setdefault('subdivision_names', []).append(sn.to_dict())
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
ne_dicts = dict_repr.get('name_elements', [])
sn_dicts = dict_repr.get('subdivision_names', [])
return_obj.type_ = dict_repr.get('type')
for ne_dict in ne_dicts:
return_obj.add_organisation_name_element(OrganisationNameElement.from_dict(ne_dict))
for sn_dict in sn_dicts:
return_obj.add_subdivision_name(SubDivisionName.from_dict(sn_dict))
return return_obj
class _BaseNameElement(stix.Entity):
"""Do not instantiate directly: use PersonNameElement or
OrganisationNameElement
"""
def __init__(self, value=None):
self.value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
# if not value:
# raise ValueError('value cannot be None')
self._value = value
@classmethod
def from_obj(cls, obj, return_obj=None):
if not return_obj:
raise ValueError("Must supply return_obj")
return_obj.value = obj.valueOf_
return return_obj
def to_obj(self, return_obj=None, ns_info=None):
super(_BaseNameElement, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj.text = self.value
return return_obj
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not return_obj:
raise ValueError("Must supply return_obj")
return_obj.value = dict_repr.get('value', None)
return return_obj
def to_dict(self):
return dict(value=self.value)
class PersonNameElement(_BaseNameElement):
_namespace = XML_NS_XNL
XML_TAG = "{%s}NameElement" % _namespace
TYPE_TITLE = 'Title'
TYPE_PRECEDING_TITLE = 'PrecedingTitle'
TYPE_FIRST_NAME = 'FirstName'
TYPE_MIDDLE_NAME = 'MiddleName'
TYPE_LAST_NAME = 'LastName'
TYPE_OTHER_NAME = 'OtherName'
TYPE_ALIAS = 'Alias'
TYPE_GENERATION_IDENTIFIER = 'GenerationIdentifier'
TYPE_DEGREE = 'Degree'
TYPES = (
TYPE_TITLE, TYPE_PRECEDING_TITLE, TYPE_FIRST_NAME, TYPE_LAST_NAME,
TYPE_MIDDLE_NAME, TYPE_OTHER_NAME, TYPE_ALIAS,
TYPE_GENERATION_IDENTIFIER, TYPE_DEGREE
)
def __init__(self, value=None, element_type=None):
super(PersonNameElement, self).__init__(value)
self.element_type = element_type
@property
def element_type(self):
return self._element_type
@element_type.setter
def element_type(self, value):
if value and value not in self.TYPES:
raise ValueError('value must be one of %s: ' % (self.TYPES,))
self._element_type = value
def to_obj(self, return_obj=None, ns_info=None):
if not return_obj:
root_tag = PersonNameElement.XML_TAG
return_obj = et.Element(root_tag)
super(PersonNameElement, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if self.element_type:
return_obj.attrib['ElementType'] = self.element_type
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.element_type = obj.get('ElementType')
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {'value': self.value}
if self.element_type:
d['element_type'] = self.element_type
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
return_obj.value = dict_repr.get('value', None)
return_obj.element_type = dict_repr.get('element_type', None)
return return_obj
class OrganisationNameElement(_BaseNameElement):
_namespace = XML_NS_XNL
XML_TAG = "{%s}NameElement" % _namespace
TYPE_NAME_ONLY = "NameOnly"
TYPE_TYPE_ONLY = "TypeOnly"
TYPE_FULL_NAME = "FullName"
TYPES = (TYPE_NAME_ONLY, TYPE_TYPE_ONLY, TYPE_FULL_NAME)
def __init__(self, value=None, element_type=None):
super(OrganisationNameElement, self).__init__(value)
self.value = value
self.element_type = element_type
@property
def element_type(self):
return self._element_type
@element_type.setter
def element_type(self, value):
if value and value not in self.TYPES:
raise ValueError('value must be one of: %s ' % (self.TYPES,))
self._element_type = value
def to_obj(self, return_obj=None, ns_info=None):
if not return_obj:
root_tag = OrganisationNameElement.XML_TAG
return_obj = et.Element(root_tag)
super(OrganisationNameElement, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if self.element_type:
return_obj.attrib['{%s}ElementType' % XML_NS_XNL] = self.element_type
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.element_type = obj.get('{%s}ElementType' % XML_NS_XNL)
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {}
if self.element_type:
d['element_type'] = self.element_type
if self.value:
d['value'] = self.value
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
return_obj.value = dict_repr.get('value')
return_obj.element_type = dict_repr.get('element_type')
return return_obj
class SubDivisionName(stix.Entity):
_namespace = XML_NS_XNL
XML_TAG = "{%s}SubDivisionName" % _namespace
TYPE_DEPARTMENT = 'Department'
TYPE_DIVISION = 'Division'
TYPE_BRANCH = 'Branch'
TYPE_BUSINESS_UNIT = 'BusinessUnit'
TYPE_SCHOOL = 'School'
TYPE_SECTION = 'Section'
TYPES = (
TYPE_DEPARTMENT, TYPE_DIVISION, TYPE_BRANCH, TYPE_BUSINESS_UNIT,
TYPE_SCHOOL, TYPE_SECTION
)
def __init__(self, value=None, type_=None):
self.value = value
self.type = type_
@property
def type(self):
return self._type
@type.setter
def type(self, value):
if value and value not in self.TYPES:
raise ValueError('value must be one of: %s' % (self.TYPES,))
self._type = value
def to_obj(self, return_obj=None, ns_info=None):
super(SubDivisionName, self).to_obj(return_obj=return_obj, ns_info=ns_info)
if not return_obj:
root_tag = SubDivisionName.XML_TAG
return_obj = et.Element(root_tag)
if self.type:
return_obj.attrib['{%s}Type' % XML_NS_XNL] = self.type
return_obj.text = self.value
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.type = obj.get('{%s}Type' % XML_NS_XNL)
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {'value': self.value}
if self.type:
d['type'] = self.type
return d
@classmethod
def from_dict(cls, dict_repr, return_obj=None):
if not dict_repr:
return None
if not return_obj:
return_obj = cls()
return_obj.value = dict_repr.get('value')
return_obj.type = dict_repr.get('type')
return return_obj
class Language(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}Language" % _namespace
def __init__(self, value=None):
self.value = value
def to_obj(self, return_obj=None, ns_info=None):
super(Language, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj = et.Element(self.XML_TAG)
return_obj.text = self.value
return return_obj
@classmethod
def from_obj(cls, obj):
if obj is None:
return None
return_obj = cls()
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {}
if self.value:
d['value'] = self.value
return d
@classmethod
def from_dict(cls, d):
if not d:
return None
return_obj = cls()
return_obj.value = d.get('value')
return return_obj
class ElectronicAddressIdentifier(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}ElectronicAddressIdentifier" % _namespace
def __init__(self, value=None, type_=None):
self.type_ = type_
self.value = value
def to_obj(self, return_obj=None, ns_info=None):
super(ElectronicAddressIdentifier, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj = et.Element(self.XML_TAG)
return_obj.text = self.value
if self.type_:
return_obj.attrib['{%s}Type' % XML_NS_XPIL] = self.type_
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.type_ = obj.attrib.get('{%s}Type' % XML_NS_XPIL)
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {}
if self.value:
d['value'] = self.value
if self.type_:
d['type'] = self.type_
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.value = d.get('value')
return_obj.type_ = d.get('type')
return return_obj
class OrganisationInfo(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}OrganisationInfo" % _namespace
def __init__(self, industry_type=None):
self.industry_type = industry_type
def to_obj(self, return_obj=None, ns_info=None):
super(OrganisationInfo, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj = et.Element(self.XML_TAG)
if self.industry_type:
return_obj.attrib['{%s}IndustryType' % self._namespace] = self.industry_type
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.industry_type = obj.get('{%s}IndustryType' % cls._namespace)
return return_obj
def to_dict(self):
d = {}
if self.industry_type:
d['industry_type'] = self.industry_type
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.industry_type = d.get('industry_type')
return return_obj
class FreeTextLine(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}FreeTextLine" % _namespace
def __init__(self, value=None, type_=None):
self.value = value
self.type_ = type_
def to_obj(self, return_obj=None, ns_info=None):
super(FreeTextLine, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj = et.Element(self.XML_TAG)
if self.type_:
return_obj.attrib['{%s}Type' % self._namespace] = self.type_
if self.value:
return_obj.text = self.value
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.type_ = obj.get('{%s}Type' % cls._namespace)
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {}
if self.type_:
d['type'] = self.type_
if self.value:
d['value'] = self.value
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.type_ = d.get('type')
return_obj.value = d.get('value')
return return_obj
class ContactNumber(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}ContactNumber" % _namespace
COM_MEDIA_TYPE_CELLPHONE = "Cellphone"
COM_MEDIA_TYPE_FAX = "Fax"
COM_MEDIA_TYPE_PAGER = "Pager"
COM_MEDIA_TYPE_TELEPHONE = "Telephone"
COM_MEDIA_TYPE_VOIP = "VOIP"
ALLOWED_COM_MEDIA_TYPES = (
COM_MEDIA_TYPE_CELLPHONE, COM_MEDIA_TYPE_FAX, COM_MEDIA_TYPE_PAGER,
COM_MEDIA_TYPE_TELEPHONE, COM_MEDIA_TYPE_VOIP
)
def __init__(self, contact_number_elements=None, communication_media_type=None):
self.communication_media_type = communication_media_type
self.contact_number_elements = contact_number_elements
@property
def contact_number_elements(self):
return self._contact_number_elements
@contact_number_elements.setter
def contact_number_elements(self, value):
self._contact_number_elements = []
if not value:
return
elif utils.is_sequence(value):
for v in value:
self.add_contact_number_element(v)
else:
self.add_contact_number_element(value)
def add_contact_number_element(self, value):
if not value:
return
elif isinstance(value, ContactNumberElement):
self.contact_number_elements.append(value)
else:
self.contact_number_elements.append(ContactNumberElement(value))
@property
def communication_media_type(self):
return self._communication_media_type
@communication_media_type.setter
def communication_media_type(self, value):
if not value:
self._communication_media_type = None
elif value not in self.ALLOWED_COM_MEDIA_TYPES:
raise ValueError('value must be one of %s' % (self.ALLOWED_COM_MEDIA_TYPES,))
else:
self._communication_media_type = value
def to_obj(self, return_obj=None, ns_info=None):
return_obj = et.Element(self.XML_TAG)
if self.communication_media_type:
return_obj.attrib['{%s}CommunicationMediaType' % self._namespace] = self.communication_media_type
if self.contact_number_elements:
for contact_number_element in self.contact_number_elements:
return_obj.append(contact_number_element.to_obj(ns_info=ns_info))
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.communication_media_type = obj.get('{%s}CommunicationMediaType' % cls._namespace)
contact_number_elements = obj.findall("{%s}ContactNumberElement" % XML_NS_XPIL)
if contact_number_elements is not None and len(contact_number_elements) > 0:
return_obj.contact_number_elements = [ContactNumberElement.from_obj(x) for x in contact_number_elements]
return return_obj
def to_dict(self):
d = {}
if self.communication_media_type:
d['communication_media_type'] = self.communication_media_type
if self.contact_number_elements:
d['contact_number_elements'] = [x.to_dict() for x in self.contact_number_elements]
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.communication_media_type = d.get('communication_media_type')
return_obj.contact_number_elements = [ContactNumberElement.from_dict(x) for x in d.get('contact_number_elements', [])]
return return_obj
class ContactNumberElement(stix.Entity):
_namespace = XML_NS_XPIL
XML_TAG = "{%s}ContactNumberElement" % _namespace
TYPE_COUNTRY_CODE = "CountryCode"
TYPE_AREA_CODE = "AreaCode"
TYPE_LOCAL_NUMBER = "LocalNumber"
TYPE_EXTENSION = "Extension"
TYPE_PIN = "Pin"
TYPE_SEPARATOR = "Separator"
TYPE_NATIONAL_NUMBER = "NationalNumber"
TYPE_INTERNATIONAL_NUMBER = "InternationalNumber"
ALLOWED_TYPES = (
TYPE_AREA_CODE, TYPE_COUNTRY_CODE, TYPE_EXTENSION,
TYPE_INTERNATIONAL_NUMBER, TYPE_LOCAL_NUMBER, TYPE_NATIONAL_NUMBER,
TYPE_SEPARATOR, TYPE_PIN
)
def __init__(self, value=None, type_=None):
self.value = value
self.type_ = type_
@property
def type_(self):
return self._type
@type_.setter
def type_(self, value):
if not value:
self._type = None
elif value not in self.ALLOWED_TYPES:
raise ValueError('value must be one of %s' % (self.ALLOWED_TYPES,))
else:
self._type = value
def to_obj(self, return_obj=None, ns_info=None):
super(ContactNumberElement, self).to_obj(return_obj=return_obj, ns_info=ns_info)
return_obj = et.Element(self.XML_TAG)
if self.type_:
return_obj.attrib['{%s}Type' % self._namespace] = self.type_
if self.value:
return_obj.text = self.value
return return_obj
@classmethod
def from_obj(cls, obj, return_obj=None):
if obj is None:
return None
if not return_obj:
return_obj = cls()
return_obj.type_ = obj.get('{%s}Type' % cls._namespace)
return_obj.value = obj.text
return return_obj
def to_dict(self):
d = {}
if self.type_:
d['type'] = self.type_
if self.value:
d['value'] = self.value
return d
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
return_obj.type_ = d.get('type')
return_obj.value = d.get('value')
return return_obj
| |
#!/usr/bin/env python
import os
import os.path
import subprocess
import struct
import sys
import unittest
from elftools.elf.elffile import ELFFile
TEST_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "elf2image")
os.chdir(TEST_DIR)
try:
ESPTOOL_PY = os.environ["ESPTOOL_PY"]
except KeyError:
ESPTOOL_PY = os.path.join(TEST_DIR, "../..", "esptool.py")
# import the version of esptool we are testing with
sys.path.append(os.path.dirname(ESPTOOL_PY))
import esptool
def try_delete(path):
try:
os.remove(path)
except OSError:
pass
def segment_matches_section(segment, section):
""" segment is an ImageSegment from an esptool binary.
section is an elftools ELF section
Returns True if they match
"""
sh_size = (section.header.sh_size + 0x3) & ~3 # pad length of ELF sections
return (section.header.sh_addr == segment.addr
and sh_size == len(segment.data))
class BaseTestCase(unittest.TestCase):
def assertEqualHex(self, expected, actual, message=None):
try:
expected = hex(expected)
except TypeError: # if expected is character
expected = hex(ord(expected))
try:
actual = hex(actual)
except TypeError: # if actual is character
actual = hex(ord(actual))
self.assertEqual(expected, actual, message)
def assertImageContainsSection(self, image, elf, section_name):
"""
Assert an esptool binary image object contains
the data for a particular ELF section.
"""
with open(elf, "rb") as f:
e = ELFFile(f)
section = e.get_section_by_name(section_name)
self.assertTrue(section, "%s should be in the ELF" % section_name)
sh_addr = section.header.sh_addr
data = section.data()
# section contents may be smeared across multiple image segments,
# so look through each segment and remove it from ELF section 'data'
# as we find it in the image segments. When we're done 'data' should
# all be accounted for
for seg in sorted(image.segments, key=lambda s:s.addr):
print("comparing seg 0x%x sec 0x%x len 0x%x" % (seg.addr, sh_addr, len(data)))
if seg.addr == sh_addr:
overlap_len = min(len(seg.data), len(data))
self.assertEqual(data[:overlap_len], seg.data[:overlap_len],
"ELF '%s' section has mis-matching binary image data" % section_name)
sh_addr += overlap_len
data = data[overlap_len:]
# no bytes in 'data' should be left unmatched
self.assertEqual(0, len(data),
"ELF %s section '%s' has no encompassing segment(s) in binary image (image segments: %s)"
% (elf, section_name, image.segments))
def assertImageInfo(self, binpath, chip="esp8266"):
"""
Run esptool.py image_info on a binary file,
assert no red flags about contents.
"""
cmd = [ sys.executable, ESPTOOL_PY, "--chip", chip, "image_info", binpath ]
try:
output = subprocess.check_output(cmd).decode("utf-8")
print(output)
except subprocess.CalledProcessError as e:
print(e.output)
raise
self.assertFalse("invalid" in output, "Checksum calculation should be valid")
self.assertFalse("warning" in output.lower(), "Should be no warnings in image_info output")
def run_elf2image(self, chip, elf_path, version=None, extra_args=[]):
""" Run elf2image on elf_path """
cmd = [sys.executable, ESPTOOL_PY, "--chip", chip, "elf2image" ]
if version is not None:
cmd += [ "--version", str(version) ]
cmd += [ elf_path ] + extra_args
print("Executing %s" % (" ".join(cmd)))
try:
output = str(subprocess.check_output(cmd))
print(output)
self.assertFalse("warning" in output.lower(), "elf2image should not output warnings")
except subprocess.CalledProcessError as e:
print(e.output)
raise
class ESP8266V1ImageTests(BaseTestCase):
ELF="esp8266-nonosssdk20-iotdemo.elf"
BIN_LOAD="esp8266-nonosssdk20-iotdemo.elf-0x00000.bin"
BIN_IROM="esp8266-nonosssdk20-iotdemo.elf-0x10000.bin"
def setUp(self):
self.run_elf2image("esp8266", self.ELF, 1)
def tearDown(self):
try_delete(self.BIN_LOAD)
try_delete(self.BIN_IROM)
def test_irom_bin(self):
with open(self.ELF, "rb") as f:
e = ELFFile(f)
irom_section = e.get_section_by_name(".irom0.text")
self.assertEqual(irom_section.header.sh_size,
os.stat(self.BIN_IROM).st_size,
"IROM raw binary file should be same length as .irom0.text section")
def test_loaded_sections(self):
image = esptool.LoadFirmwareImage("esp8266", self.BIN_LOAD)
self.assertEqual(3, len(image.segments))
self.assertImageContainsSection(image, self.ELF, ".data")
self.assertImageContainsSection(image, self.ELF, ".text")
self.assertImageContainsSection(image, self.ELF, ".rodata")
class ESP8266V12SectionHeaderNotAtEnd(BaseTestCase):
""" Ref https://github.com/espressif/esptool/issues/197 -
this ELF image has the section header not at the end of the file """
ELF="esp8266-nonossdkv12-example.elf"
BIN=ELF+"-0x00000.bin"
def test_elf_section_header_not_at_end(self):
self.run_elf2image("esp8266", self.ELF)
image = esptool.LoadFirmwareImage("esp8266", self.BIN)
self.assertEqual(3, len(image.segments))
self.assertImageContainsSection(image, self.ELF, ".data")
self.assertImageContainsSection(image, self.ELF, ".text")
self.assertImageContainsSection(image, self.ELF, ".rodata")
def tearDown(self):
try_delete(self.BIN)
class ESP8266V2ImageTests(BaseTestCase):
def _test_elf2image(self, elfpath, binpath):
try:
self.run_elf2image("esp8266", elfpath, 2)
image = esptool.LoadFirmwareImage("esp8266", binpath)
self.assertEqual(4, len(image.segments))
self.assertImageContainsSection(image, elfpath, ".data")
self.assertImageContainsSection(image, elfpath, ".text")
self.assertImageContainsSection(image, elfpath, ".rodata")
irom_segment = image.segments[0]
self.assertEqual(0, irom_segment.addr,
"IROM segment 'load address' should be zero")
with open(elfpath, "rb") as f:
e = ELFFile(f)
sh_size = (e.get_section_by_name(".irom0.text").header.sh_size + 15) & ~15
self.assertEqual(len(irom_segment.data), sh_size, "irom segment (0x%x) should be same size (16 padded) as .irom0.text section (0x%x)" % (len(irom_segment.data), sh_size))
# check V2 CRC (for ESP8266 SDK bootloader)
with open(binpath, "rb") as f:
f.seek(-4, os.SEEK_END)
image_len = f.tell()
crc_stored = struct.unpack("<I", f.read(4))[0]
f.seek(0)
crc_calc = esptool.esp8266_crc32(f.read(image_len))
self.assertEqual(crc_stored, crc_calc)
# test imageinfo doesn't fail
self.assertImageInfo(binpath)
finally:
try_delete(binpath)
def test_nonossdkimage(self):
ELF="esp8266-nonossdkv20-at-v2.elf"
BIN="esp8266-nonossdkv20-at-v2-0x01000.bin"
self._test_elf2image(ELF, BIN)
def test_espopenrtosimage(self):
ELF="esp8266-openrtos-blink-v2.elf"
BIN="esp8266-openrtos-blink-v2-0x02000.bin"
self._test_elf2image(ELF, BIN)
class ESP32ImageTests(BaseTestCase):
def _test_elf2image(self, elfpath, binpath):
try:
self.run_elf2image("esp32", elfpath)
image = esptool.LoadFirmwareImage("esp32", binpath)
self.assertImageInfo(binpath, "esp32")
return image
finally:
try_delete(binpath)
def test_bootloader(self):
ELF="esp32-bootloader.elf"
BIN="esp32-bootloader.bin"
image = self._test_elf2image(ELF, BIN)
self.assertEqual(4, len(image.segments))
for section in [ ".iram1.text", ".iram_pool_1.text",
".dram0.data", ".dram0.rodata"]:
self.assertImageContainsSection(image, ELF, section)
def test_app_template(self):
ELF="esp32-app-template.elf"
BIN="esp32-app-template.bin"
image = self._test_elf2image(ELF, BIN)
self.assertEqual(8, len(image.segments))
# the other two segments are padding segments
for section in [ ".iram0.text", ".iram0.vectors",
".dram0.data", ".flash.rodata",
".flash.text", ".rtc.text"]:
self.assertImageContainsSection(image, ELF, section)
class ESP8266FlashHeaderTests(BaseTestCase):
def test_2mb(self):
ELF="esp8266-nonossdkv20-at-v2.elf"
BIN="esp8266-nonossdkv20-at-v2-0x01000.bin"
try:
self.run_elf2image("esp8266", ELF, version=2, extra_args=["--flash_size", "2MB", "--flash_mode", "dio"])
with open(BIN, "rb") as f:
header = f.read(4)
print("header %r" % header)
self.assertEqualHex(0xea, header[0])
self.assertEqualHex(0x02, header[2])
self.assertEqualHex(0x30, header[3])
finally:
try_delete(BIN)
class ESP32FlashHeaderTests(BaseTestCase):
def test_16mb(self):
ELF="esp32-app-template.elf"
BIN="esp32-app-template.bin"
try:
self.run_elf2image("esp32", ELF, extra_args=["--flash_size", "16MB", "--flash_mode", "dio"])
with open(BIN, "rb") as f:
header = f.read(4)
self.assertEqualHex(0xe9, header[0])
self.assertEqualHex(0x02, header[2])
self.assertEqualHex(0x40, header[3])
finally:
try_delete(BIN)
if __name__ == '__main__':
print("Running image generation tests...")
unittest.main(buffer=True)
| |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfbase/pdfmetrics.py
#$Header $
__version__=''' $Id: pdfmetrics.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""This provides a database of font metric information and
efines Font, Encoding and TypeFace classes aimed at end users.
There are counterparts to some of these in pdfbase/pdfdoc.py, but
the latter focus on constructing the right PDF objects. These
classes are declarative and focus on letting the user construct
and query font objects.
The module maintains a registry of font objects at run time.
It is independent of the canvas or any particular context. It keeps
a registry of Font, TypeFace and Encoding objects. Ideally these
would be pre-loaded, but due to a nasty circularity problem we
trap attempts to access them and do it on first access.
"""
import string, os, sys
from reportlab.pdfbase import _fontdata
from reportlab.lib.logger import warnOnce
from reportlab.lib.utils import rl_isfile, rl_glob, rl_isdir, open_and_read, open_and_readlines, findInPaths, isSeqType, isStrType, isUnicodeType
from reportlab.rl_config import defaultEncoding, T1SearchPath
from reportlab.pdfbase import rl_codecs
_notdefChar = b'n'
rl_codecs.RL_Codecs.register()
standardFonts = _fontdata.standardFonts
standardEncodings = _fontdata.standardEncodings
_typefaces = {}
_encodings = {}
_fonts = {}
def _py_unicode2T1(utext,fonts):
'''return a list of (font,string) pairs representing the unicode text'''
R = []
font, fonts = fonts[0], fonts[1:]
enc = font.encName
if 'UCS-2' in enc:
enc = 'UTF16'
while utext:
try:
if isUnicodeType(utext):
s = utext.encode(enc)
else:
s = utext
R.append((font,s))
break
except UnicodeEncodeError as e:
i0, il = e.args[2:4]
if i0:
R.append((font,utext[:i0].encode(enc)))
if fonts:
R.extend(_py_unicode2T1(utext[i0:il],fonts))
else:
R.append((_notdefFont,_notdefChar*(il-i0)))
utext = utext[il:]
return R
try:
from _rl_accel import unicode2T1
except ImportError:
unicode2T1 = _py_unicode2T1
class FontError(Exception):
pass
class FontNotFoundError(Exception):
pass
def parseAFMFile(afmFileName):
"""Quick and dirty - gives back a top-level dictionary
with top-level items, and a 'widths' key containing
a dictionary of glyph names and widths. Just enough
needed for embedding. A better parser would accept
options for what data you wwanted, and preserve the
order."""
lines = open_and_readlines(afmFileName, 'r')
if len(lines)<=1:
#likely to be a MAC file
if lines: lines = lines[0].split('\r')
if len(lines)<=1:
raise ValueError('AFM file %s hasn\'t enough data' % afmFileName)
topLevel = {}
glyphLevel = []
lines = [l.strip() for l in lines]
lines = [l for l in lines if not l.lower().startswith('comment')]
#pass 1 - get the widths
inMetrics = 0 # os 'TOP', or 'CHARMETRICS'
for line in lines:
if line[0:16] == 'StartCharMetrics':
inMetrics = 1
elif line[0:14] == 'EndCharMetrics':
inMetrics = 0
elif inMetrics:
chunks = line.split(';')
chunks = [chunk.strip() for chunk in chunks]
cidChunk, widthChunk, nameChunk = chunks[0:3]
# character ID
l, r = cidChunk.split()
assert l == 'C', 'bad line in font file %s' % line
cid = int(r)
# width
l, r = widthChunk.split()
assert l == 'WX', 'bad line in font file %s' % line
width = int(r)
# name
l, r = nameChunk.split()
assert l == 'N', 'bad line in font file %s' % line
name = r
glyphLevel.append((cid, width, name))
# pass 2 font info
inHeader = 0
for line in lines:
if line[0:16] == 'StartFontMetrics':
inHeader = 1
if line[0:16] == 'StartCharMetrics':
inHeader = 0
elif inHeader:
if line[0:7] == 'Comment': pass
try:
left, right = line.split(' ',1)
except:
raise ValueError("Header information error in afm %s: line='%s'" % (afmFileName, line))
try:
right = int(right)
except:
pass
topLevel[left] = right
return (topLevel, glyphLevel)
class TypeFace:
def __init__(self, name):
self.name = name
self.glyphNames = []
self.glyphWidths = {}
self.ascent = 0
self.descent = 0
# all typefaces of whatever class should have these 3 attributes.
# these are the basis for family detection.
self.familyName = None # should set on load/construction if possible
self.bold = 0 # bold faces should set this
self.italic = 0 #italic faces should set this
if name == 'ZapfDingbats':
self.requiredEncoding = 'ZapfDingbatsEncoding'
elif name == 'Symbol':
self.requiredEncoding = 'SymbolEncoding'
else:
self.requiredEncoding = None
if name in standardFonts:
self.builtIn = 1
self._loadBuiltInData(name)
else:
self.builtIn = 0
def _loadBuiltInData(self, name):
"""Called for the built in 14 fonts. Gets their glyph data.
We presume they never change so this can be a shared reference."""
name = str(name) #needed for pycanvas&jython/2.1 compatibility
self.glyphWidths = _fontdata.widthsByFontGlyph[name]
self.glyphNames = list(self.glyphWidths.keys())
self.ascent,self.descent = _fontdata.ascent_descent[name]
def getFontFiles(self):
"Info function, return list of the font files this depends on."
return []
def findT1File(self, ext='.pfb'):
possible_exts = (ext.lower(), ext.upper())
if hasattr(self,'pfbFileName'):
r_basename = os.path.splitext(self.pfbFileName)[0]
for e in possible_exts:
if rl_isfile(r_basename + e):
return r_basename + e
try:
r = _fontdata.findT1File(self.name)
except:
afm = bruteForceSearchForAFM(self.name)
if afm:
if ext.lower() == '.pfb':
for e in possible_exts:
pfb = os.path.splitext(afm)[0] + e
if rl_isfile(pfb):
r = pfb
else:
r = None
elif ext.lower() == '.afm':
r = afm
else:
r = None
if r is None:
warnOnce("Can't find %s for face '%s'" % (ext, self.name))
return r
def bruteForceSearchForFile(fn,searchPath=None):
if searchPath is None: from reportlab.rl_config import T1SearchPath as searchPath
if rl_isfile(fn): return fn
bfn = os.path.basename(fn)
for dirname in searchPath:
if not rl_isdir(dirname): continue
tfn = os.path.join(dirname,bfn)
if rl_isfile(tfn): return tfn
return fn
def bruteForceSearchForAFM(faceName):
"""Looks in all AFM files on path for face with given name.
Returns AFM file name or None. Ouch!"""
from reportlab.rl_config import T1SearchPath
for dirname in T1SearchPath:
if not rl_isdir(dirname): continue
possibles = rl_glob(dirname + os.sep + '*.[aA][fF][mM]')
for possible in possibles:
try:
topDict, glyphDict = parseAFMFile(possible)
if topDict['FontName'] == faceName:
return possible
except:
t,v,b=sys.exc_info()
v.args = (' '.join(map(str,v.args))+', while looking for faceName=%r' % faceName,)
raise
#for faceName in standardFonts:
# registerTypeFace(TypeFace(faceName))
class Encoding:
"""Object to help you create and refer to encodings."""
def __init__(self, name, base=None):
self.name = name
self.frozen = 0
if name in standardEncodings:
assert base is None, "Can't have a base encoding for a standard encoding"
self.baseEncodingName = name
self.vector = _fontdata.encodings[name]
elif base == None:
# assume based on the usual one
self.baseEncodingName = defaultEncoding
self.vector = _fontdata.encodings[defaultEncoding]
elif isStrType(base):
baseEnc = getEncoding(base)
self.baseEncodingName = baseEnc.name
self.vector = baseEnc.vector[:]
elif isSeqType(base):
self.baseEncodingName = defaultEncoding
self.vector = base[:]
elif isinstance(base, Encoding):
# accept a vector
self.baseEncodingName = base.name
self.vector = base.vector[:]
def __getitem__(self, index):
"Return glyph name for that code point, or None"
# THIS SHOULD BE INLINED FOR SPEED
return self.vector[index]
def __setitem__(self, index, value):
# should fail if they are frozen
assert self.frozen == 0, 'Cannot modify a frozen encoding'
if self.vector[index]!=value:
L = list(self.vector)
L[index] = value
self.vector = tuple(L)
def freeze(self):
self.vector = tuple(self.vector)
self.frozen = 1
def isEqual(self, other):
return self.name==other.name and tuple(self.vector)==tuple(other.vector)
def modifyRange(self, base, newNames):
"""Set a group of character names starting at the code point 'base'."""
assert self.frozen == 0, 'Cannot modify a frozen encoding'
idx = base
for name in newNames:
self.vector[idx] = name
idx = idx + 1
def getDifferences(self, otherEnc):
"""
Return a compact list of the code points differing between two encodings
This is in the Adobe format: list of
[[b1, name1, name2, name3],
[b2, name4]]
where b1...bn is the starting code point, and the glyph names following
are assigned consecutive code points.
"""
ranges = []
curRange = None
for i in range(len(self.vector)):
glyph = self.vector[i]
if glyph==otherEnc.vector[i]:
if curRange:
ranges.append(curRange)
curRange = []
else:
if curRange:
curRange.append(glyph)
elif glyph:
curRange = [i, glyph]
if curRange:
ranges.append(curRange)
return ranges
def makePDFObject(self):
"Returns a PDF Object representing self"
# avoid circular imports - this cannot go at module level
from reportlab.pdfbase import pdfdoc
D = {}
baseEnc = getEncoding(self.baseEncodingName)
differences = self.getDifferences(baseEnc) #[None] * 256)
# if no differences, we just need the base name
if differences == []:
return pdfdoc.PDFName(self.baseEncodingName)
else:
#make up a dictionary describing the new encoding
diffArray = []
for range in differences:
diffArray.append(range[0]) # numbers go 'as is'
for glyphName in range[1:]:
if glyphName is not None:
# there is no way to 'unset' a character in the base font.
diffArray.append('/' + glyphName)
#print 'diffArray = %s' % diffArray
D["Differences"] = pdfdoc.PDFArray(diffArray)
D["BaseEncoding"] = pdfdoc.PDFName(self.baseEncodingName)
D["Type"] = pdfdoc.PDFName("Encoding")
PD = pdfdoc.PDFDictionary(D)
return PD
#for encName in standardEncodings:
# registerEncoding(Encoding(encName))
standardT1SubstitutionFonts = []
class Font:
"""Represents a font (i.e combination of face and encoding).
Defines suitable machinery for single byte fonts. This is
a concrete class which can handle the basic built-in fonts;
not clear yet if embedded ones need a new font class or
just a new typeface class (which would do the job through
composition)"""
_multiByte = 0 # do not want our own stringwidth
_dynamicFont = 0 # do not want dynamic subsetting
def __init__(self, name, faceName, encName):
self.fontName = name
face = self.face = getTypeFace(faceName)
self.encoding= getEncoding(encName)
self.encName = encName
if face.builtIn and face.requiredEncoding is None:
_ = standardT1SubstitutionFonts
else:
_ = []
self.substitutionFonts = _
self._calcWidths()
self._notdefChar = _notdefChar
self._notdefFont = name=='ZapfDingbats' and self or _notdefFont
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.face.name)
def _calcWidths(self):
"""Vector of widths for stringWidth function"""
#synthesize on first request
w = [0] * 256
gw = self.face.glyphWidths
vec = self.encoding.vector
for i in range(256):
glyphName = vec[i]
if glyphName is not None:
try:
width = gw[glyphName]
w[i] = width
except KeyError:
import reportlab.rl_config
if reportlab.rl_config.warnOnMissingFontGlyphs:
print('typeface "%s" does not have a glyph "%s", bad font!' % (self.face.name, glyphName))
else:
pass
self.widths = w
def _py_stringWidth(self, text, size, encoding='utf8'):
"""This is the "purist" approach to width. The practical approach
is to use the stringWidth function, which may be swapped in for one
written in C."""
if not isUnicodeType(text):
text = text.decode(encoding)
w = 0
for f, t in unicode2T1(text,[self]+self.substitutionFonts):
if sys.version_info[0] == 3:
w += sum([f.widths[c] for c in t])
else:
w += sum([f.widths[ord(c)] for c in t])
return w*0.001*size
stringWidth = _py_stringWidth
def _formatWidths(self):
"returns a pretty block in PDF Array format to aid inspection"
text = b'['
for i in range(256):
text = text + b' ' + self.widths[i]
if i % 16 == 15:
text = text + b'\n'
test += b' ]'
return text
def addObjects(self, doc):
"""Makes and returns one or more PDF objects to be added
to the document. The caller supplies the internal name
to be used (typically F1, F2... in sequence) """
# avoid circular imports - this cannot go at module level
from reportlab.pdfbase import pdfdoc
# construct a Type 1 Font internal object
internalName = 'F' + repr(len(doc.fontMapping)+1)
pdfFont = pdfdoc.PDFType1Font()
pdfFont.Name = internalName
pdfFont.BaseFont = self.face.name
pdfFont.__Comment__ = 'Font %s' % self.fontName
pdfFont.Encoding = self.encoding.makePDFObject()
# is it a built-in one? if not, need more stuff.
if not self.face.name in standardFonts:
pdfFont.FirstChar = 0
pdfFont.LastChar = 255
pdfFont.Widths = pdfdoc.PDFArray(self.widths)
pdfFont.FontDescriptor = self.face.addObjects(doc)
# now link it in
ref = doc.Reference(pdfFont, internalName)
# also refer to it in the BasicFonts dictionary
fontDict = doc.idToObject['BasicFonts'].dict
fontDict[internalName] = pdfFont
# and in the font mappings
doc.fontMapping[self.fontName] = '/' + internalName
PFB_MARKER=chr(0x80)
PFB_ASCII=chr(1)
PFB_BINARY=chr(2)
PFB_EOF=chr(3)
if sys.version_info[0] == 3:
def _pfbCheck(p,d,m,fn):
if chr(d[p])!=PFB_MARKER or chr(d[p+1])!=m:
raise ValueError('Bad pfb file\'%s\' expected chr(%d)chr(%d) at char %d, got chr(%d)chr(%d)' % (fn,ord(PFB_MARKER),ord(m),p,d[p],d[p+1]))
if m==PFB_EOF: return
p = p + 2
l = (((((d[p+3])<<8)|(d[p+2])<<8)|(d[p+1]))<<8)|(d[p])
p = p + 4
if p+l>len(d):
raise ValueError('Bad pfb file\'%s\' needed %d+%d bytes have only %d!' % (fn,p,l,len(d)))
return p, p+l
else:
def _pfbCheck(p,d,m,fn):
if d[p]!=PFB_MARKER or d[p+1]!=m:
raise ValueError('Bad pfb file\'%s\' expected chr(%d)chr(%d) at char %d, got chr(%d)chr(%d)' % (fn,ord(PFB_MARKER),ord(m),p,ord(d[p]),ord(d[p+1])))
if m==PFB_EOF: return
p = p + 2
l = ((((ord(d[p+3])<<8)|ord(d[p+2])<<8)|ord(d[p+1]))<<8)|ord(d[p])
p = p + 4
if p+l>len(d):
raise ValueError('Bad pfb file\'%s\' needed %d+%d bytes have only %d!' % (fn,p,l,len(d)))
return p, p+l
class EmbeddedType1Face(TypeFace):
"""A Type 1 font other than one of the basic 14.
Its glyph data will be embedded in the PDF file."""
def __init__(self, afmFileName, pfbFileName):
# ignore afm file for now
TypeFace.__init__(self, None)
#None is a hack, name will be supplied by AFM parse lower done
#in this __init__ method.
afmFileName = findInPaths(afmFileName,T1SearchPath)
pfbFileName = findInPaths(pfbFileName,T1SearchPath)
self.afmFileName = os.path.abspath(afmFileName)
self.pfbFileName = os.path.abspath(pfbFileName)
self.requiredEncoding = None
self._loadGlyphs(pfbFileName)
self._loadMetrics(afmFileName)
def getFontFiles(self):
return [self.afmFileName, self.pfbFileName]
def _loadGlyphs(self, pfbFileName):
"""Loads in binary glyph data, and finds the four length
measurements needed for the font descriptor"""
pfbFileName = bruteForceSearchForFile(pfbFileName)
assert rl_isfile(pfbFileName), 'file %s not found' % pfbFileName
d = open_and_read(pfbFileName, 'b')
s1, l1 = _pfbCheck(0,d,PFB_ASCII,pfbFileName)
s2, l2 = _pfbCheck(l1,d,PFB_BINARY,pfbFileName)
s3, l3 = _pfbCheck(l2,d,PFB_ASCII,pfbFileName)
_pfbCheck(l3,d,PFB_EOF,pfbFileName)
self._binaryData = d[s1:l1]+d[s2:l2]+d[s3:l3]
self._length = len(self._binaryData)
self._length1 = l1-s1
self._length2 = l2-s2
self._length3 = l3-s3
def _loadMetrics(self, afmFileName):
"""Loads in and parses font metrics"""
#assert os.path.isfile(afmFileName), "AFM file %s not found" % afmFileName
afmFileName = bruteForceSearchForFile(afmFileName)
(topLevel, glyphData) = parseAFMFile(afmFileName)
self.name = topLevel['FontName']
self.familyName = topLevel['FamilyName']
self.ascent = topLevel.get('Ascender', 1000)
self.descent = topLevel.get('Descender', 0)
self.capHeight = topLevel.get('CapHeight', 1000)
self.italicAngle = topLevel.get('ItalicAngle', 0)
self.stemV = topLevel.get('stemV', 0)
self.xHeight = topLevel.get('XHeight', 1000)
strBbox = topLevel.get('FontBBox', [0,0,1000,1000])
tokens = strBbox.split()
self.bbox = []
for tok in tokens:
self.bbox.append(int(tok))
glyphWidths = {}
for (cid, width, name) in glyphData:
glyphWidths[name] = width
self.glyphWidths = glyphWidths
self.glyphNames = list(glyphWidths.keys())
self.glyphNames.sort()
# for font-specific encodings like Symbol, Dingbats, Carta we
# need to make a new encoding as well....
if topLevel.get('EncodingScheme', None) == 'FontSpecific':
names = [None] * 256
for (code, width, name) in glyphData:
if code >=0 and code <=255:
names[code] = name
encName = self.name + 'Encoding'
self.requiredEncoding = encName
enc = Encoding(encName, names)
registerEncoding(enc)
def addObjects(self, doc):
"""Add whatever needed to PDF file, and return a FontDescriptor reference"""
from reportlab.pdfbase import pdfdoc
fontFile = pdfdoc.PDFStream()
fontFile.content = self._binaryData
#fontFile.dictionary['Length'] = self._length
fontFile.dictionary['Length1'] = self._length1
fontFile.dictionary['Length2'] = self._length2
fontFile.dictionary['Length3'] = self._length3
#fontFile.filters = [pdfdoc.PDFZCompress]
fontFileRef = doc.Reference(fontFile, 'fontFile:' + self.pfbFileName)
fontDescriptor = pdfdoc.PDFDictionary({
'Type': '/FontDescriptor',
'Ascent':self.ascent,
'CapHeight':self.capHeight,
'Descent':self.descent,
'Flags': 34,
'FontBBox':pdfdoc.PDFArray(self.bbox),
'FontName':pdfdoc.PDFName(self.name),
'ItalicAngle':self.italicAngle,
'StemV':self.stemV,
'XHeight':self.xHeight,
'FontFile': fontFileRef,
})
fontDescriptorRef = doc.Reference(fontDescriptor, 'fontDescriptor:' + self.name)
return fontDescriptorRef
def registerTypeFace(face):
assert isinstance(face, TypeFace), 'Not a TypeFace: %s' % face
_typefaces[face.name] = face
if not face.name in standardFonts:
# HACK - bold/italic do not apply for type 1, so egister
# all combinations of mappings.
registerFontFamily(face.name)
def registerEncoding(enc):
assert isinstance(enc, Encoding), 'Not an Encoding: %s' % enc
if enc.name in _encodings:
# already got one, complain if they are not the same
if enc.isEqual(_encodings[enc.name]):
enc.freeze()
else:
raise FontError('Encoding "%s" already registered with a different name vector!' % enc.name)
else:
_encodings[enc.name] = enc
enc.freeze()
# have not yet dealt with immutability!
def registerFontFamily(family,normal=None,bold=None,italic=None,boldItalic=None):
from reportlab.lib import fonts
if not normal: normal = family
family = family.lower()
if not boldItalic: boldItalic = italic or bold or normal
if not bold: bold = normal
if not italic: italic = normal
fonts.addMapping(family, 0, 0, normal)
fonts.addMapping(family, 1, 0, bold)
fonts.addMapping(family, 0, 1, italic)
fonts.addMapping(family, 1, 1, boldItalic)
def registerFont(font):
"Registers a font, including setting up info for accelerated stringWidth"
#assert isinstance(font, Font), 'Not a Font: %s' % font
fontName = font.fontName
_fonts[fontName] = font
if font._multiByte:
# CID fonts don't need to have typeface registered.
#need to set mappings so it can go in a paragraph even if within
# bold tags
registerFontFamily(font.fontName)
def getTypeFace(faceName):
"""Lazily construct known typefaces if not found"""
try:
return _typefaces[faceName]
except KeyError:
# not found, construct it if known
if faceName in standardFonts:
face = TypeFace(faceName)
(face.familyName, face.bold, face.italic) = _fontdata.standardFontAttributes[faceName]
registerTypeFace(face)
## print 'auto-constructing type face %s with family=%s, bold=%d, italic=%d' % (
## face.name, face.familyName, face.bold, face.italic)
return face
else:
#try a brute force search
afm = bruteForceSearchForAFM(faceName)
if afm:
for e in ('.pfb', '.PFB'):
pfb = os.path.splitext(afm)[0] + e
if rl_isfile(pfb): break
assert rl_isfile(pfb), 'file %s not found!' % pfb
face = EmbeddedType1Face(afm, pfb)
registerTypeFace(face)
return face
else:
raise
def getEncoding(encName):
"""Lazily construct known encodings if not found"""
try:
return _encodings[encName]
except KeyError:
if encName in standardEncodings:
enc = Encoding(encName)
registerEncoding(enc)
#print 'auto-constructing encoding %s' % encName
return enc
else:
raise
def findFontAndRegister(fontName):
'''search for and register a font given its name'''
assert type(fontName) is str
#it might have a font-specific encoding e.g. Symbol
# or Dingbats. If not, take the default.
face = getTypeFace(fontName)
if face.requiredEncoding:
font = Font(fontName, fontName, face.requiredEncoding)
else:
font = Font(fontName, fontName, defaultEncoding)
registerFont(font)
return font
def getFont(fontName):
"""Lazily constructs known fonts if not found.
Names of form 'face-encoding' will be built if
face and encoding are known. Also if the name is
just one of the standard 14, it will make up a font
in the default encoding."""
try:
return _fonts[fontName]
except KeyError:
return findFontAndRegister(fontName)
_notdefFont = getFont('ZapfDingbats')
standardT1SubstitutionFonts.extend([getFont('Symbol'),_notdefFont])
def getAscentDescent(fontName,fontSize=None):
font = getFont(fontName)
try:
ascent = font.ascent
descent = font.descent
except:
ascent = font.face.ascent
descent = font.face.descent
if fontSize:
norm = fontSize/1000.
return ascent*norm, descent*norm
else:
return ascent, descent
def getAscent(fontName,fontSize=None):
return getAscentDescent(fontName,fontSize)[0]
def getDescent(fontName,fontSize=None):
return getAscentDescent(fontName,fontSize)[1]
def getRegisteredFontNames():
"Returns what's in there"
reg = list(_fonts.keys())
reg.sort()
return reg
def stringWidth(text, fontName, fontSize, encoding='utf8'):
"""Compute width of string in points;
not accelerated as fast enough because of _instanceStringWidthU"""
return getFont(fontName).stringWidth(text, fontSize, encoding=encoding)
try:
from _rl_accel import _instanceStringWidthU
import new
Font.stringWidth = new.instancemethod(_instanceStringWidthU,None,Font)
except ImportError:
pass
def dumpFontData():
print('Registered Encodings:')
keys = list(_encodings.keys())
keys.sort()
for encName in keys:
print(' ',encName)
print()
print('Registered Typefaces:')
faces = list(_typefaces.keys())
faces.sort()
for faceName in faces:
print(' ',faceName)
print()
print('Registered Fonts:')
k = list(_fonts.keys())
k.sort()
for key in k:
font = _fonts[key]
print(' %s (%s/%s)' % (font.fontName, font.face.name, font.encoding.name))
def test3widths(texts):
# checks all 3 algorithms give same answer, note speed
import time
for fontName in standardFonts[0:1]:
## t0 = time.time()
## for text in texts:
## l1 = stringWidth(text, fontName, 10)
## t1 = time.time()
## print 'fast stringWidth took %0.4f' % (t1 - t0)
t0 = time.time()
w = getFont(fontName).widths
for text in texts:
l2 = 0
for ch in text:
l2 = l2 + w[ord(ch)]
t1 = time.time()
print('slow stringWidth took %0.4f' % (t1 - t0))
t0 = time.time()
for text in texts:
l3 = getFont(fontName).stringWidth(text, 10)
t1 = time.time()
print('class lookup and stringWidth took %0.4f' % (t1 - t0))
print()
def testStringWidthAlgorithms():
rawdata = open('../../rlextra/rml2pdf/doc/rml_user_guide.prep').read()
print('rawdata length %d' % len(rawdata))
print('test one huge string...')
test3widths([rawdata])
print()
words = rawdata.split()
print('test %d shorter strings (average length %0.2f chars)...' % (len(words), 1.0*len(rawdata)/len(words)))
test3widths(words)
def test():
helv = TypeFace('Helvetica')
registerTypeFace(helv)
print(helv.glyphNames[0:30])
wombat = TypeFace('Wombat')
print(wombat.glyphNames)
registerTypeFace(wombat)
dumpFontData()
#preserve the initial values here
def _reset(
initial_dicts = dict(
_typefaces = _typefaces.copy(),
_encodings = _encodings.copy(),
_fonts = _fonts.copy(),
)
):
for k,v in initial_dicts.items():
d=globals()[k]
d.clear()
d.update(v)
from reportlab.rl_config import register_reset
register_reset(_reset)
del register_reset
if __name__=='__main__':
test()
testStringWidthAlgorithms()
| |
# -*- coding: utf-8 -*-
"""
scikit-MDR was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Tuan Nguyen (tnguyen4@swarthmore.edu)
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
from collections import defaultdict
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
from sklearn.metrics import accuracy_score
class MDRBase(BaseEstimator):
"""Base Multifactor Dimensionality Reduction (MDR) functions.
MDR can take categorical features and binary endpoints as input, and outputs a binary constructed feature or prediction."""
def __init__(self, tie_break=1, default_label=0):
"""Sets up the MDR algorithm for feature construction.
Parameters
----------
tie_break: int (default: 1)
Default label in case there's a tie in a set of feature pair values
default_label: int (default: 0)
Default label in case there's no data for a set of feature pair values
Returns
-------
None
"""
self.tie_break = tie_break
self.default_label = default_label
self.class_count_matrix = None
self.feature_map = None
def fit(self, features, class_labels):
"""Constructs the MDR feature map from the provided training data.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
self: A copy of the fitted model
"""
unique_labels = sorted(np.unique(class_labels))
if len(unique_labels) != 2:
raise ValueError('MDR only supports binary endpoints.')
# Count the distribution of classes that fall into each MDR grid cell
self.class_count_matrix = defaultdict(lambda: defaultdict(int))
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
self.class_count_matrix[feature_instance][class_labels[row_i]] += 1
self.class_count_matrix = dict(self.class_count_matrix)
# Only applies to binary classification
overall_class_fraction = float(sum(class_labels == unique_labels[0])) / class_labels.size
# If one class is more abundant in a MDR grid cell than it is overall, then assign the cell to that class
self.feature_map = {}
for feature_instance in self.class_count_matrix:
counts = self.class_count_matrix[feature_instance]
fraction = float(counts[unique_labels[0]]) / np.sum(list(counts.values()))
if fraction > overall_class_fraction:
self.feature_map[feature_instance] = unique_labels[0]
elif fraction == overall_class_fraction:
self.feature_map[feature_instance] = self.tie_break
else:
self.feature_map[feature_instance] = unique_labels[1]
return self
class MDR(MDRBase, TransformerMixin):
"""Multifactor Dimensionality Reduction (MDR) for feature construction in binary classification problems.
MDR can take categorical features and binary endpoints as input, and outputs a binary constructed feature."""
def transform(self, features):
"""Uses the MDR feature map to construct a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to transform
Returns
----------
array-like: {n_samples, 1}
Constructed features from the provided feature matrix
"""
if self.feature_map is None:
raise ValueError('The MDR model must be fit before transform can be called.')
new_feature = np.zeros(features.shape[0], dtype=np.int)
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
if feature_instance in self.feature_map:
new_feature[row_i] = self.feature_map[feature_instance]
else:
new_feature[row_i] = self.default_label
return new_feature.reshape(features.shape[0], 1)
def fit_transform(self, features, class_labels):
"""Convenience function that fits the provided data then constructs a new feature from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples, 1}
Constructed features from the provided feature matrix
"""
self.fit(features, class_labels)
return self.transform(features)
class MDRClassifier(MDRBase, ClassifierMixin):
"""Multifactor Dimensionality Reduction (MDR) for binary classification problems.
MDR can take categorical features and binary endpoints as input, and outputs a binary prediction."""
def predict(self, features):
"""Uses the MDR feature map to construct predictions from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to transform
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
"""
if self.feature_map is None:
raise ValueError('The MDR model must be fit before predict can be called.')
new_feature = np.zeros(features.shape[0], dtype=np.int)
for row_i in range(features.shape[0]):
feature_instance = tuple(features[row_i])
if feature_instance in self.feature_map:
new_feature[row_i] = self.feature_map[feature_instance]
else:
new_feature[row_i] = self.default_label
return new_feature
def fit_predict(self, features, class_labels):
"""Convenience function that fits the provided data then constructs predictions from the provided features.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
class_labels: array-like {n_samples}
List of true class labels
Returns
----------
array-like: {n_samples}
Constructed features from the provided feature matrix
"""
self.fit(features, class_labels)
return self.predict(features)
def score(self, features, class_labels, scoring_function=None, **scoring_function_kwargs):
"""Estimates the accuracy of the predictions from the constructed feature.
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix to predict from
class_labels: array-like {n_samples}
List of true class labels
Returns
-------
accuracy_score: float
The estimated accuracy based on the constructed feature
"""
if self.feature_map is None:
raise ValueError('The MDR model must be fit before score can be called.')
new_feature = self.predict(features)
if scoring_function is None:
return accuracy_score(class_labels, new_feature)
else:
return scoring_function(class_labels, new_feature, **scoring_function_kwargs)
| |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import json
import os
import sys
import re
import time
import tempfile
import itertools
import datetime
import pstats
import socket
import struct
import threading
import traceback
import six
from .console import log
from .results import Results, format_benchmark_result
from . import statistics
from . import util
WIN = (os.name == "nt")
# Can't use benchmark.__file__, because that points to the compiled
# file, so it can't be run by another version of Python.
BENCHMARK_RUN_SCRIPT = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "benchmark.py")
JSON_ERROR_RETCODE = -257
BenchmarkResult = util.namedtuple_with_doc(
'BenchmarkResult',
['result', 'samples', 'number', 'errcode', 'stderr', 'profile'],
"""
Postprocessed benchmark result
Attributes
----------
result : list of object
List of numeric values of the benchmarks (one for each parameter
combination).
Values are `None` if benchmark failed or NaN if it was skipped.
samples : list of {list, None}
List of lists of sampled raw data points (or Nones if
no sampling done).
number : list of {dict, None}
List of actual repeat counts for each sample (or Nones if
no sampling done).
errcode : int
Process exit code
stderr : str
Process stdout/stderr output
profile : bytes
If `profile` is `True` and run was at least partially successful,
this key will be a byte string containing the cProfile data.
Otherwise, None.
""")
def skip_benchmarks(benchmarks, env, results=None):
"""
Mark benchmarks as skipped.
Parameters
----------
benchmarks : Benchmarks
Set of benchmarks to skip
env : Environment
Environment to skip them in
results : Results, optional
Where to store the results.
If omitted, stored to a new unnamed Results object.
Returns
-------
results : Results
Benchmark results.
"""
if results is None:
results = Results.unnamed()
log.warning("Skipping {0}".format(env.name))
with log.indent():
for name, benchmark in six.iteritems(benchmarks):
log.step()
log.warning('{0} skipped'.format(name))
started_at = datetime.datetime.utcnow()
r = fail_benchmark(benchmark)
results.add_result(benchmark, r,
selected_idx=benchmarks.benchmark_selection.get(name),
started_at=started_at)
return results
def run_benchmarks(benchmarks, env, results=None,
show_stderr=False, quick=False, profile=False,
extra_params=None,
record_samples=False, append_samples=False,
run_rounds=None,
launch_method=None):
"""
Run all of the benchmarks in the given `Environment`.
Parameters
----------
benchmarks : Benchmarks
Benchmarks to run
env : Environment object
Environment in which to run the benchmarks.
results : Results, optional
Where to store the results.
If omitted, stored to a new unnamed Results object.
show_stderr : bool, optional
When `True`, display any stderr emitted by the benchmark.
quick : bool, optional
When `True`, run each benchmark function exactly once.
This is useful to quickly find errors in the benchmark
functions, without taking the time necessary to get
accurate timings.
profile : bool, optional
When `True`, run the benchmark through the `cProfile`
profiler.
extra_params : dict, optional
Override values for benchmark attributes.
record_samples : bool, optional
Whether to retain result samples or discard them.
append_samples : bool, optional
Whether to retain any previously measured result samples
and use them in statistics computations.
run_rounds : sequence of int, optional
Run rounds for benchmarks with multiple processes.
If None, run all rounds.
launch_method : {'auto', 'spawn', 'forkserver'}, optional
Benchmark launching method to use.
Returns
-------
results : Results
Benchmark results.
"""
if extra_params is None:
extra_params = {}
else:
extra_params = dict(extra_params)
if quick:
extra_params['number'] = 1
extra_params['repeat'] = 1
extra_params['warmup_time'] = 0
extra_params['processes'] = 1
if results is None:
results = Results.unnamed()
# Find all setup_cache routines needed
setup_cache_timeout = {}
benchmark_order = {}
cache_users = {}
max_processes = 0
def get_processes(benchmark):
"""Get number of processes to use for a job"""
if 'processes' in extra_params:
return int(extra_params['processes'])
else:
return int(benchmark.get('processes', 1))
for name, benchmark in sorted(six.iteritems(benchmarks)):
key = benchmark.get('setup_cache_key')
setup_cache_timeout[key] = max(benchmark.get('setup_cache_timeout',
benchmark['timeout']),
setup_cache_timeout.get(key, 0))
benchmark_order.setdefault(key, []).append((name, benchmark))
max_processes = max(max_processes, get_processes(benchmark))
cache_users.setdefault(key, set()).add(name)
if run_rounds is None:
run_rounds = list(range(1, max_processes + 1))
# Interleave benchmark runs, in setup_cache order
existing_results = results.get_result_keys(benchmarks)
def iter_run_items():
for run_round in run_rounds[::-1]:
for setup_cache_key, benchmark_set in six.iteritems(benchmark_order):
for name, benchmark in benchmark_set:
log.step()
processes = get_processes(benchmark)
if run_round > processes:
if (not append_samples and
run_round == run_rounds[-1] and
name in existing_results):
# We need to remove samples here so that
# append_samples=False has an effect on all
# benchmarks regardless of whether they were
# run this round.
selected_idx = benchmarks.benchmark_selection.get(name)
results.remove_samples(name, selected_idx)
continue
is_final = (run_round == 1)
yield name, benchmark, setup_cache_key, is_final
# Run benchmarks in order
cache_dirs = {None: None}
failed_benchmarks = set()
failed_setup_cache = {}
if append_samples:
previous_result_keys = existing_results
else:
previous_result_keys = set()
benchmark_durations = {}
log.info("Benchmarking {0}".format(env.name))
partial_info_time = None
indent = log.indent()
indent.__enter__()
spawner = get_spawner(env, benchmarks.benchmark_dir,
launch_method=launch_method)
try:
# Preimport benchmark suite (if using forkserver)
success, out = spawner.preimport()
if success:
if show_stderr and out:
log.info("Importing benchmark suite produced output:")
with log.indent():
log.error(out.rstrip())
else:
log.warning("Importing benchmark suite failed (skipping all benchmarks).")
if show_stderr and out:
with log.indent():
log.error(out)
stderr = 'asv: benchmark suite import failed'
for name, benchmark, setup_cache_key, is_final in iter_run_items():
if name in failed_benchmarks:
continue
selected_idx = benchmarks.benchmark_selection.get(name)
started_at = datetime.datetime.utcnow()
res = fail_benchmark(benchmark, stderr=stderr)
results.add_result(benchmark, res,
selected_idx=selected_idx,
started_at=started_at,
record_samples=record_samples)
failed_benchmarks.add(name)
return results
# Run benchmarks
for name, benchmark, setup_cache_key, is_final in iter_run_items():
selected_idx = benchmarks.benchmark_selection.get(name)
started_at = datetime.datetime.utcnow()
# Don't try to rerun failed benchmarks
if name in failed_benchmarks:
if is_final:
partial_info_time = None
log.info(name, reserve_space=True)
log_benchmark_result(results, benchmark,
show_stderr=show_stderr)
continue
# Setup cache first, if needed
if setup_cache_key is None:
cache_dir = None
elif setup_cache_key in cache_dirs:
cache_dir = cache_dirs[setup_cache_key]
elif setup_cache_key not in failed_setup_cache:
partial_info_time = None
log.info("Setting up {0}".format(setup_cache_key), reserve_space=True)
params_str = json.dumps({'cpu_affinity': extra_params.get('cpu_affinity')})
cache_dir, stderr = spawner.create_setup_cache(
name, setup_cache_timeout[setup_cache_key], params_str)
if cache_dir is not None:
log.add_padded('ok')
cache_dirs[setup_cache_key] = cache_dir
else:
log.add_padded('failed')
if stderr and show_stderr:
with log.indent():
log.error(stderr)
failed_setup_cache[setup_cache_key] = stderr
duration = (datetime.datetime.utcnow() - started_at).total_seconds()
results.set_setup_cache_duration(setup_cache_key, duration)
started_at = datetime.datetime.utcnow()
if setup_cache_key in failed_setup_cache:
# Mark benchmark as failed
partial_info_time = None
log.warning('{0} skipped (setup_cache failed)'.format(name))
stderr = 'asv: setup_cache failed\n\n{}'.format(failed_setup_cache[setup_cache_key])
res = fail_benchmark(benchmark, stderr=stderr)
results.add_result(benchmark, res,
selected_idx=selected_idx,
started_at=started_at,
record_samples=record_samples)
failed_benchmarks.add(name)
continue
# If appending to previous results, make sure to use the
# same value for 'number' attribute.
cur_extra_params = extra_params
if name in previous_result_keys:
cur_extra_params = []
prev_stats = results.get_result_stats(name, benchmark['params'])
for s in prev_stats:
if s is None or 'number' not in s:
p = extra_params
else:
p = dict(extra_params)
p['number'] = s['number']
cur_extra_params.append(p)
# Run benchmark
if is_final:
partial_info_time = None
log.info(name, reserve_space=True)
elif partial_info_time is None or time.time() > partial_info_time + 30:
partial_info_time = time.time()
log.info('Running ({0}--)'.format(name))
res = run_benchmark(benchmark, spawner,
profile=profile,
selected_idx=selected_idx,
extra_params=cur_extra_params,
cwd=cache_dir)
# Retain runtime durations
ended_at = datetime.datetime.utcnow()
if name in benchmark_durations:
benchmark_durations[name] += (ended_at - started_at).total_seconds()
else:
benchmark_durations[name] = (ended_at - started_at).total_seconds()
# Save result
results.add_result(benchmark, res,
selected_idx=selected_idx,
started_at=started_at,
duration=benchmark_durations[name],
record_samples=(not is_final or record_samples),
append_samples=(name in previous_result_keys))
previous_result_keys.add(name)
if all(r is None for r in res.result):
failed_benchmarks.add(name)
# Log result
if is_final:
partial_info_time = None
log_benchmark_result(results, benchmark,
show_stderr=show_stderr)
else:
log.add('.')
# Cleanup setup cache, if no users left
if cache_dir is not None and is_final:
cache_users[setup_cache_key].remove(name)
if not cache_users[setup_cache_key]:
# No users of this cache left, perform cleanup
util.long_path_rmtree(cache_dir, True)
del cache_dirs[setup_cache_key]
finally:
# Cleanup any dangling caches
for cache_dir in cache_dirs.values():
if cache_dir is not None:
util.long_path_rmtree(cache_dir, True)
indent.__exit__(None, None, None)
spawner.close()
return results
def get_spawner(env, benchmark_dir, launch_method):
has_fork = hasattr(os, 'fork') and hasattr(socket, 'AF_UNIX')
if launch_method in (None, 'auto'):
# Don't use ForkServer as default on OSX, because many Apple
# things are not fork-safe
if has_fork and sys.platform not in ('darwin',):
launch_method = "forkserver"
else:
launch_method = "spawn"
if launch_method == "spawn":
spawner_cls = Spawner
elif launch_method == "forkserver":
if not has_fork:
raise util.UserError("'forkserver' launch method not available "
"on this platform")
spawner_cls = ForkServer
else:
raise ValueError("Invalid launch_method: {}".format(launch_method))
return spawner_cls(env, benchmark_dir)
def log_benchmark_result(results, benchmark, show_stderr=False):
info, details = format_benchmark_result(results, benchmark)
log.add_padded(info)
if details:
log.info(details, color='default')
# Dump program output
stderr = results.stderr.get(benchmark['name'])
errcode = results.errcode.get(benchmark['name'])
if errcode not in (None, 0, util.TIMEOUT_RETCODE, JSON_ERROR_RETCODE):
# Display also error code
if not stderr:
stderr = ""
else:
stderr += "\n"
stderr += "asv: benchmark failed (exit status {})".format(errcode)
if stderr and show_stderr:
with log.indent():
log.error(stderr)
def fail_benchmark(benchmark, stderr='', errcode=1):
"""
Return a BenchmarkResult describing a failed benchmark.
"""
if benchmark['params']:
# Mark only selected parameter combinations skipped
params = itertools.product(*benchmark['params'])
result = [None for idx in params]
samples = [None] * len(result)
number = [None] * len(result)
else:
result = [None]
samples = [None]
number = [None]
return BenchmarkResult(result=result,
samples=samples,
number=number,
errcode=errcode,
stderr=stderr,
profile=None)
def run_benchmark(benchmark, spawner, profile,
selected_idx=None,
extra_params=None,
cwd=None,
prev_result=None):
"""
Run a benchmark.
Parameters
----------
benchmark : dict
Benchmark object dict
spawner : Spawner
Benchmark process spawner
profile : bool
Whether to run with profile
selected_idx : set, optional
Set of parameter indices to run for.
extra_params : {dict, list}, optional
Additional parameters to pass to the benchmark.
If a list, each entry should correspond to a benchmark
parameter combination.
cwd : str, optional
Working directory to run the benchmark in.
If None, run in a temporary directory.
Returns
-------
result : BenchmarkResult
Result data.
"""
if extra_params is None:
extra_params = {}
result = []
samples = []
number = []
profiles = []
stderr = ''
errcode = 0
if benchmark['params']:
param_iter = enumerate(itertools.product(*benchmark['params']))
else:
param_iter = [(0, None)]
for param_idx, params in param_iter:
if selected_idx is not None and param_idx not in selected_idx:
result.append(util.nan)
samples.append(None)
number.append(None)
profiles.append(None)
continue
if isinstance(extra_params, list):
cur_extra_params = extra_params[param_idx]
else:
cur_extra_params = extra_params
res = _run_benchmark_single_param(
benchmark, spawner, param_idx,
extra_params=cur_extra_params, profile=profile,
cwd=cwd)
result += res.result
samples += res.samples
number += res.number
profiles.append(res.profile)
if res.stderr:
stderr += "\n\n"
stderr += res.stderr
if res.errcode != 0:
errcode = res.errcode
return BenchmarkResult(
result=result,
samples=samples,
number=number,
errcode=errcode,
stderr=stderr.strip(),
profile=_combine_profile_data(profiles)
)
def _run_benchmark_single_param(benchmark, spawner, param_idx,
profile, extra_params, cwd):
"""
Run a benchmark, for single parameter combination index in case it
is parameterized
Parameters
----------
benchmark : dict
Benchmark object dict
spawner : Spawner
Benchmark process spawner
param_idx : {int, None}
Parameter index to run benchmark for
profile : bool
Whether to run with profile
extra_params : dict
Additional parameters to pass to the benchmark
cwd : {str, None}
Working directory to run the benchmark in.
If None, run in a temporary directory.
Returns
-------
result : BenchmarkResult
Result data.
"""
name = benchmark['name']
if benchmark['params']:
name += '-%d' % (param_idx,)
if profile:
profile_fd, profile_path = tempfile.mkstemp()
os.close(profile_fd)
else:
profile_path = 'None'
params_str = json.dumps(extra_params)
if cwd is None:
real_cwd = tempfile.mkdtemp()
else:
real_cwd = cwd
result_file = tempfile.NamedTemporaryFile(delete=False)
try:
result_file.close()
out, errcode = spawner.run(
name=name, params_str=params_str, profile_path=profile_path,
result_file_name=result_file.name,
timeout=benchmark['timeout'],
cwd=real_cwd)
if errcode != 0:
if errcode == util.TIMEOUT_RETCODE:
out += "\n\nasv: benchmark timed out (timeout {0}s)\n".format(benchmark['timeout'])
result = None
samples = None
number = None
else:
with open(result_file.name, 'r') as stream:
data = stream.read()
try:
data = json.loads(data)
except ValueError as exc:
data = None
errcode = JSON_ERROR_RETCODE
out += "\n\nasv: failed to parse benchmark result: {0}\n".format(exc)
# Special parsing for timing benchmark results
if isinstance(data, dict) and 'samples' in data and 'number' in data:
result = True
samples = data['samples']
number = data['number']
else:
result = data
samples = None
number = None
if benchmark['params'] and out:
params, = itertools.islice(itertools.product(*benchmark['params']),
param_idx, param_idx + 1)
out = "For parameters: {0}\n{1}".format(", ".join(params), out)
if profile:
with io.open(profile_path, 'rb') as profile_fd:
profile_data = profile_fd.read()
profile_data = profile_data if profile_data else None
else:
profile_data = None
return BenchmarkResult(
result=[result],
samples=[samples],
number=[number],
errcode=errcode,
stderr=out.strip(),
profile=profile_data)
except KeyboardInterrupt:
spawner.interrupt()
raise util.UserError("Interrupted.")
finally:
os.remove(result_file.name)
if profile:
os.remove(profile_path)
if cwd is None:
util.long_path_rmtree(real_cwd, True)
class Spawner(object):
"""
Manage launching individual benchmark.py commands
"""
def __init__(self, env, benchmark_dir):
self.env = env
self.benchmark_dir = os.path.abspath(benchmark_dir)
self.interrupted = False
def interrupt(self):
self.interrupted = True
def create_setup_cache(self, benchmark_id, timeout, params_str):
cache_dir = tempfile.mkdtemp()
env_vars = dict(os.environ)
env_vars.update(self.env.env_vars)
out, _, errcode = self.env.run(
[BENCHMARK_RUN_SCRIPT, 'setup_cache',
os.path.abspath(self.benchmark_dir),
benchmark_id, params_str],
dots=False, display_error=False,
return_stderr=True, valid_return_codes=None,
redirect_stderr=True,
cwd=cache_dir,
timeout=timeout,
env=env_vars)
if errcode == 0:
return cache_dir, None
else:
util.long_path_rmtree(cache_dir, True)
out += '\nasv: setup_cache failed (exit status {})'.format(errcode)
return None, out.strip()
def run(self, name, params_str, profile_path, result_file_name, timeout, cwd):
env_vars = dict(os.environ)
env_vars.update(self.env.env_vars)
out, _, errcode = self.env.run(
[BENCHMARK_RUN_SCRIPT, 'run', os.path.abspath(self.benchmark_dir),
name, params_str, profile_path, result_file_name],
dots=False, timeout=timeout,
display_error=False, return_stderr=True, redirect_stderr=True,
valid_return_codes=None, cwd=cwd,
env=env_vars)
return out, errcode
def preimport(self):
return True, ""
def close(self):
pass
class ForkServer(Spawner):
def __init__(self, env, root):
super(ForkServer, self).__init__(env, root)
if not (hasattr(os, 'fork') and hasattr(os, 'setpgid')):
raise RuntimeError("ForkServer only available on POSIX")
self.tmp_dir = tempfile.mkdtemp(prefix='asv-forkserver-')
self.socket_name = os.path.join(self.tmp_dir, 'socket')
env_vars = dict(os.environ)
env_vars.update(env.env_vars)
self.server_proc = env.run(
[BENCHMARK_RUN_SCRIPT, 'run_server', self.benchmark_dir, self.socket_name],
return_popen=True,
redirect_stderr=True,
env=env_vars)
self._server_output = None
self.stdout_reader_thread = threading.Thread(target=self._stdout_reader)
self.stdout_reader_thread.start()
# Wait for the socket to appear
while self.stdout_reader_thread.is_alive():
if os.path.exists(self.socket_name):
break
time.sleep(0.05)
if not os.path.exists(self.socket_name):
os.rmdir(self.tmp_dir)
raise RuntimeError("Failed to start server thread")
def _stdout_reader(self):
try:
out = self.server_proc.stdout.read()
self.server_proc.stdout.close()
out = out.decode('utf-8', 'replace')
except Exception as exc:
import traceback
out = traceback.format_exc()
self._server_output = out
def run(self, name, params_str, profile_path, result_file_name, timeout, cwd):
msg = {'action': 'run',
'benchmark_id': name,
'params_str': params_str,
'profile_path': profile_path,
'result_file': result_file_name,
'timeout': timeout,
'cwd': cwd}
result = self._send_command(msg)
return result['out'], result['errcode']
def preimport(self):
success = True
out = ""
try:
out = self._send_command({'action': 'preimport'})
except Exception as exc:
success = False
out = "asv: benchmark runner crashed\n"
if isinstance(exc, util.UserError):
out += str(exc)
else:
out += traceback.format_exc()
out = out.rstrip()
return success, out
def _send_command(self, msg):
msg = json.dumps(msg)
if sys.version_info[0] >= 3:
msg = msg.encode('utf-8')
# Connect (with wait+retry)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
for retry in range(5, 0, -1):
try:
s.connect(self.socket_name)
break
except socket.error:
if retry > 1:
time.sleep(0.2)
else:
raise
# Send command
try:
s.sendall(struct.pack('<Q', len(msg)))
s.sendall(msg)
# Read result
read_size, = struct.unpack('<Q', util.recvall(s, 8))
result_text = util.recvall(s, read_size)
if sys.version_info[0] >= 3:
result_text = result_text.decode('utf-8')
result = json.loads(result_text)
except Exception:
exitcode = self.server_proc.poll()
if exitcode is not None:
raise util.UserError("Process exited with code {0}".format(exitcode))
raise
finally:
s.close()
return result
def close(self):
import signal
# Check for termination
if self.server_proc.poll() is None:
util._killpg_safe(self.server_proc.pid, signal.SIGINT)
if self.server_proc.poll() is None:
time.sleep(0.1)
if self.server_proc.poll() is None:
# Kill process group
util._killpg_safe(self.server_proc.pid, signal.SIGKILL)
self.server_proc.wait()
self.stdout_reader_thread.join()
if self._server_output and not self.interrupted:
with log.indent():
log.error("asv: forkserver:")
log.error(self._server_output)
util.long_path_rmtree(self.tmp_dir)
def _combine_profile_data(datasets):
"""
Combine a list of profile data to a single profile
"""
datasets = [data for data in datasets if data is not None]
if not datasets:
return None
elif len(datasets) == 1:
return datasets[0]
# Load and combine stats
stats = None
while datasets:
data = datasets.pop(0)
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.write(data)
f.close()
if stats is None:
stats = pstats.Stats(f.name)
else:
stats.add(f.name)
finally:
os.remove(f.name)
# Write combined stats out
f = tempfile.NamedTemporaryFile(delete=False)
try:
f.close()
stats.dump_stats(f.name)
with open(f.name, 'rb') as fp:
return fp.read()
finally:
os.remove(f.name)
| |
#
# This file is part of pySMT.
#
# Copyright 2015 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from math import log, ceil
from pysmt.exceptions import SolverAPINotFound
try:
import pyboolector
except ImportError:
raise SolverAPINotFound
from pysmt.solvers.solver import (IncrementalTrackingSolver, UnsatCoreSolver,
Converter, SolverOptions)
from pysmt.solvers.smtlib import SmtLibBasicSolver, SmtLibIgnoreMixin
from pysmt.solvers.eager import EagerModel
from pysmt.walkers import DagWalker
from pysmt.exceptions import (SolverReturnedUnknownResultError,
ConvertExpressionError, PysmtValueError)
from pysmt.decorators import clear_pending_pop, catch_conversion_error
from pysmt.logics import QF_BV, QF_UFBV, QF_ABV, QF_AUFBV, QF_AX
from pysmt.constants import to_python_integer
class BoolectorOptions(SolverOptions):
def __init__(self, **base_options):
SolverOptions.__init__(self, **base_options)
if self.random_seed is not None:
raise PysmtValueError("BTOR Does not support Random Seed setting.")
# Disabling Incremental usage is not allowed.
# This needs to be set to 1
self.incrementality = True
self.internal_options = [pyboolector.BTOR_OPT_MODEL_GEN,
pyboolector.BTOR_OPT_INCREMENTAL,
pyboolector.BTOR_OPT_INCREMENTAL_SMT1,
pyboolector.BTOR_OPT_INPUT_FORMAT,
pyboolector.BTOR_OPT_OUTPUT_NUMBER_FORMAT,
pyboolector.BTOR_OPT_OUTPUT_FORMAT,
pyboolector.BTOR_OPT_ENGINE,
pyboolector.BTOR_OPT_SAT_ENGINE,
pyboolector.BTOR_OPT_AUTO_CLEANUP,
pyboolector.BTOR_OPT_PRETTY_PRINT,
pyboolector.BTOR_OPT_EXIT_CODES,
pyboolector.BTOR_OPT_SEED,
pyboolector.BTOR_OPT_VERBOSITY,
pyboolector.BTOR_OPT_LOGLEVEL,
pyboolector.BTOR_OPT_REWRITE_LEVEL,
pyboolector.BTOR_OPT_SKELETON_PREPROC,
pyboolector.BTOR_OPT_ACKERMANN,
pyboolector.BTOR_OPT_BETA_REDUCE,
pyboolector.BTOR_OPT_ELIMINATE_SLICES,
pyboolector.BTOR_OPT_VAR_SUBST,
pyboolector.BTOR_OPT_UCOPT,
pyboolector.BTOR_OPT_MERGE_LAMBDAS,
pyboolector.BTOR_OPT_EXTRACT_LAMBDAS,
pyboolector.BTOR_OPT_NORMALIZE,
pyboolector.BTOR_OPT_NORMALIZE_ADD,
pyboolector.BTOR_OPT_FUN_PREPROP,
pyboolector.BTOR_OPT_FUN_PRESLS,
pyboolector.BTOR_OPT_FUN_DUAL_PROP,
pyboolector.BTOR_OPT_FUN_DUAL_PROP_QSORT,
pyboolector.BTOR_OPT_FUN_JUST,
pyboolector.BTOR_OPT_FUN_JUST_HEURISTIC,
pyboolector.BTOR_OPT_FUN_LAZY_SYNTHESIZE,
pyboolector.BTOR_OPT_FUN_EAGER_LEMMAS,
pyboolector.BTOR_OPT_FUN_STORE_LAMBDAS,
pyboolector.BTOR_OPT_SLS_NFLIPS,
pyboolector.BTOR_OPT_SLS_STRATEGY,
pyboolector.BTOR_OPT_SLS_JUST,
pyboolector.BTOR_OPT_SLS_MOVE_GW,
pyboolector.BTOR_OPT_SLS_MOVE_RANGE,
pyboolector.BTOR_OPT_SLS_MOVE_SEGMENT,
pyboolector.BTOR_OPT_SLS_MOVE_RAND_WALK,
pyboolector.BTOR_OPT_SLS_PROB_MOVE_RAND_WALK,
pyboolector.BTOR_OPT_SLS_MOVE_RAND_ALL,
pyboolector.BTOR_OPT_SLS_MOVE_RAND_RANGE,
pyboolector.BTOR_OPT_SLS_MOVE_PROP,
pyboolector.BTOR_OPT_SLS_MOVE_PROP_N_PROP,
pyboolector.BTOR_OPT_SLS_MOVE_PROP_N_SLS,
pyboolector.BTOR_OPT_SLS_MOVE_PROP_FORCE_RW,
pyboolector.BTOR_OPT_SLS_MOVE_INC_MOVE_TEST,
pyboolector.BTOR_OPT_SLS_USE_RESTARTS,
pyboolector.BTOR_OPT_SLS_USE_BANDIT,
pyboolector.BTOR_OPT_PROP_NPROPS,
pyboolector.BTOR_OPT_PROP_USE_RESTARTS,
pyboolector.BTOR_OPT_PROP_USE_BANDIT,
pyboolector.BTOR_OPT_PROP_PATH_SEL,
pyboolector.BTOR_OPT_PROP_PROB_USE_INV_VALUE,
pyboolector.BTOR_OPT_PROP_PROB_FLIP_COND,
pyboolector.BTOR_OPT_PROP_PROB_FLIP_COND_CONST,
pyboolector.BTOR_OPT_PROP_FLIP_COND_CONST_DELTA,
pyboolector.BTOR_OPT_PROP_FLIP_COND_CONST_NPATHSEL,
pyboolector.BTOR_OPT_PROP_PROB_SLICE_KEEP_DC,
pyboolector.BTOR_OPT_PROP_PROB_CONC_FLIP,
pyboolector.BTOR_OPT_PROP_PROB_SLICE_FLIP,
pyboolector.BTOR_OPT_PROP_PROB_EQ_FLIP,
pyboolector.BTOR_OPT_PROP_PROB_AND_FLIP,
pyboolector.BTOR_OPT_PROP_NO_MOVE_ON_CONFLICT,
pyboolector.BTOR_OPT_AIGPROP_USE_RESTARTS,
pyboolector.BTOR_OPT_AIGPROP_USE_BANDIT,
pyboolector.BTOR_OPT_QUANT_SYNTH,
pyboolector.BTOR_OPT_QUANT_DUAL_SOLVER,
pyboolector.BTOR_OPT_QUANT_SYNTH_LIMIT,
pyboolector.BTOR_OPT_QUANT_SYNTH_QI,
pyboolector.BTOR_OPT_QUANT_DER,
pyboolector.BTOR_OPT_QUANT_CER,
pyboolector.BTOR_OPT_QUANT_MINISCOPE,
pyboolector.BTOR_OPT_SORT_EXP,
pyboolector.BTOR_OPT_SORT_AIG,
pyboolector.BTOR_OPT_SORT_AIGVEC,
pyboolector.BTOR_OPT_AUTO_CLEANUP_INTERNAL,
pyboolector.BTOR_OPT_SIMPLIFY_CONSTRAINTS,
pyboolector.BTOR_OPT_CHK_FAILED_ASSUMPTIONS,
pyboolector.BTOR_OPT_CHK_MODEL,
pyboolector.BTOR_OPT_CHK_UNCONSTRAINED,
pyboolector.BTOR_OPT_PARSE_INTERACTIVE,
pyboolector.BTOR_OPT_SAT_ENGINE_LGL_FORK,
pyboolector.BTOR_OPT_SAT_ENGINE_CADICAL_FREEZE,
pyboolector.BTOR_OPT_SAT_ENGINE_N_THREADS,
pyboolector.BTOR_OPT_SIMP_NORMAMLIZE_ADDERS,
pyboolector.BTOR_OPT_DECLSORT_BV_WIDTH,
pyboolector.BTOR_OPT_QUANT_SYNTH_ITE_COMPLETE,
pyboolector.BTOR_OPT_QUANT_FIXSYNTH,
pyboolector.BTOR_OPT_RW_ZERO_LOWER_SLICE,
pyboolector.BTOR_OPT_NONDESTR_SUBST]
def _set_option(self, btor, name, value):
available_options = {pyboolector.BoolectorOpt(btor, io).lng : io
for io in self.internal_options}
try:
btor.Set_opt(available_options[name], value)
except TypeError:
raise PysmtValueError("Error setting the option '%s=%s'" \
% (name,value))
except pyboolector.BoolectorException:
raise PysmtValueError("Error setting the option '%s=%s'" \
% (name,value))
except KeyError:
raise PysmtValueError("Unable to set non-existing option '%s'. "
"The accepted options options are: %s" \
% (name, ", ".join(pyboolector.BoolectorOpt(btor, io).lng
for io in self.internal_options)))
def __call__(self, solver):
if self.generate_models:
self._set_option(solver.btor, "model-gen", 1)
else:
self._set_option(solver.btor, "model-gen", 0)
if self.incrementality:
self._set_option(solver.btor, "incremental", 1)
for k,v in self.solver_options.items():
# Note: Options values in btor are mostly integers
self._set_option(solver.btor, str(k), v)
# EOC BoolectorOptions
class BoolectorSolver(IncrementalTrackingSolver, UnsatCoreSolver,
SmtLibBasicSolver, SmtLibIgnoreMixin):
LOGICS = [QF_BV, QF_UFBV, QF_ABV, QF_AUFBV, QF_AX]
OptionsClass = BoolectorOptions
def __init__(self, environment, logic, **options):
IncrementalTrackingSolver.__init__(self,
environment=environment,
logic=logic,
**options)
self.btor = pyboolector.Boolector()
self.options(self)
self.converter = BTORConverter(environment, self.btor)
self.mgr = environment.formula_manager
self.declarations = {}
self._named_assertions = {}
return
# EOC BoolectorOptions
pass
@clear_pending_pop
def _reset_assertions(self):
self.btor = pyboolector.Boolector()
self.options(self)
self.converter = BTORConverter(self.environment, self.btor)
self.declarations = {}
@clear_pending_pop
def declare_variable(self, var):
raise NotImplementedError
@clear_pending_pop
def _add_assertion(self, formula, named=None):
self._assert_is_boolean(formula)
term = self.converter.convert(formula)
if self.options.unsat_cores_mode is None:
self.btor.Assert(term)
else:
if self.options.unsat_cores_mode == "named" and \
named is not None:
self._named_assertions[formula] = named
# need to use assumptions to get unsat cores
self.btor.Assume(term)
return formula
def get_model(self):
assignment = {}
for s, _ in self.converter.declared_vars.items():
assignment[s] = self.get_value(s)
return EagerModel(assignment=assignment, environment=self.environment)
@clear_pending_pop
def _solve(self, assumptions=None):
if assumptions is not None:
btor_assumptions = [self.converter.convert(a) for a in assumptions]
self.btor.Assume(*btor_assumptions)
res = self.btor.Sat()
# need to re-add assumptions if in unsat-core mode
# which uses Assume instead of Assert
if self.options.unsat_cores_mode is not None:
for a in self._assertion_stack:
self._add_assertion(a)
if res == self.btor.SAT:
return True
elif res == self.btor.UNSAT:
return False
else:
raise SolverReturnedUnknownResultError
def get_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
set of formulae"""
self._check_unsat_core_config()
if self.options.unsat_cores_mode == 'all':
unsat_core = set()
# relies on this assertion stack being ordered
assert isinstance(self._assertion_stack, list)
btor_assertions = [self.converter.convert(a) for a in self._assertion_stack]
in_unsat_core = self.btor.Failed(*btor_assertions)
for a, in_core in zip(self._assertion_stack, in_unsat_core):
if in_core:
unsat_core.add(a)
return unsat_core
else:
return self.get_named_unsat_core().values()
def get_named_unsat_core(self):
"""After a call to solve() yielding UNSAT, returns the unsat core as a
dict of names to formulae"""
self._check_unsat_core_config()
if self.options.unsat_cores_mode == "named":
unsat_core = {}
# relies on this assertion stack being ordered
assert isinstance(self._assertion_stack, list)
btor_named_assertions = [self.converter.convert(a) for a in self._named_assertions.keys()]
in_unsat_core = self.btor.Failed(*btor_named_assertions)
for a, in_core in zip(self._assertion_stack, in_unsat_core):
if in_core:
name = self._named_assertions[a]
unsat_core[name] = a
return unsat_core
else:
return dict(("_a%d" % i, f)
for i, f in enumerate(self.get_unsat_core()))
@clear_pending_pop
def _push(self, levels=1):
self.btor.Push(levels)
@clear_pending_pop
def _pop(self, levels=1):
self.btor.Pop(levels)
def print_model(self, name_filter=None):
for var in self.declarations:
if name_filter is None or not var.symbol_name().startswith(name_filter):
print("%s = %s" % (var.symbol_name(), self.get_value(var)))
def get_value(self, item):
self._assert_no_function_type(item)
itype = item.get_type()
titem = self.converter.convert(item)
if itype.is_bv_type():
return self.mgr.BV(titem.assignment, item.bv_width())
elif itype.is_bool_type():
return self.mgr.Bool(bool(int(titem.assignment)))
else:
assert itype.is_array_type()
assert itype.index_type.is_bv_type()
assert itype.elem_type.is_bv_type()
idx_width = itype.index_type.width
val_width = itype.elem_type.width
assign = {}
for (idx, val) in titem.assignment:
assign[self.mgr.BV(idx, idx_width)] = self.mgr.BV(val, val_width)
return self.mgr.Array(itype.index_type,
self.mgr.BV(0, val_width), assign)
def _exit(self):
del self.btor
class BTORConverter(Converter, DagWalker):
def __init__(self, environment, btor):
DagWalker.__init__(self, environment)
self.mgr = environment.formula_manager
self._get_type = environment.stc.get_type
self._back_memoization = {}
self._btor = btor
self.declared_vars = {}
self.function_declarations = {}
return
@catch_conversion_error
def convert(self, formula):
return self.walk(formula)
def back(self, expr):
raise NotImplementedError
def declare_function(self, formula):
name = formula.function_name()
if name not in self.function_declarations:
tp = self._type_to_btor(name.symbol_type())
decl = self._btor.UF(tp)
self.function_declarations[name] = decl
else:
decl = self.function_declarations[name]
return decl
def walk_and(self, formula, args, **kwargs):
assert len(args) >= 2
res = self._btor.And(args[0], args[1])
for conj in args[2:]:
res = self._btor.And(res, conj)
return res
def walk_or(self, formula, args, **kwargs):
assert len(args) >= 2
res = self._btor.Or(args[0], args[1])
for disj in args[2:]:
res = self._btor.Or(res, disj)
return res
def walk_not(self, formula, args, **kwargs):
return self._btor.Not(args[0])
def walk_symbol(self, formula, **kwargs):
symbol_type = formula.symbol_type()
if symbol_type.is_bool_type():
res = self._btor.Var(self._btor.BitVecSort(1), formula.symbol_name())
elif symbol_type.is_real_type():
raise ConvertExpressionError
elif symbol_type.is_int_type():
raise ConvertExpressionError
elif symbol_type.is_array_type():
# BTOR supports only Arrays of Type (BV, BV)
index_type = symbol_type.index_type
elem_type = symbol_type.elem_type
if not (index_type.is_bv_type() and elem_type.is_bv_type()):
raise ConvertExpressionError("BTOR supports only Array(BV,BV). "\
"Type '%s' was given." % str(symbol_type))
res = self._btor.Array(self._btor.ArraySort(self._btor.BitVecSort(index_type.width),
self._btor.BitVecSort(elem_type.width)),
formula.symbol_name())
elif symbol_type.is_bv_type():
res = self._btor.Var(self._btor.BitVecSort(formula.bv_width()),
formula.symbol_name())
else:
raise SolverReturnedUnknownResultError("Unknown type for BTOR")
self.declared_vars[formula] = res
return res
def walk_iff(self, formula, args, **kwargs):
return self._btor.Iff(*args)
def walk_implies(self, formula, args, **kwargs):
return self._btor.Implies(*args)
def walk_ite(self, formula, args, **kwargs):
return self._btor.Cond(*args)
def walk_bool_constant(self, formula, **kwargs):
return self._btor.Const(formula.constant_value())
def walk_equals(self, formula, args, **kwargs):
args = self._extend_bv_equal_width(*args)
return self._btor.Eq(*args)
def walk_function(self, formula, args, **kwargs):
_uf = self.declare_function(formula)
return _uf(*args)
def walk_bv_constant(self, formula, **kwargs):
value = to_python_integer(formula.constant_value())
width = formula.bv_width()
return self._btor.Const(value, width)
def walk_bv_ult(self, formula, args, **kwargs):
return self._btor.Ult(args[0], args[1])
def walk_bv_ule(self, formula, args, **kwargs):
return self._btor.Ulte(args[0], args[1])
def walk_bv_concat(self, formula, args, **kwargs):
return self._btor.Concat(args[0], args[1])
def walk_bv_extract(self, formula, args, **kwargs):
start = formula.bv_extract_start()
end = formula.bv_extract_end()
return self._btor.Slice(args[0], end, start)
def walk_bv_or(self, formula, args, **kwargs):
return self.walk_or(formula, args, **kwargs)
def walk_bv_not(self, formula, args, **kwargs):
return self.walk_not(formula, args, **kwargs)
def walk_bv_and(self, formula, args, **kwargs):
return self.walk_and(formula, args, **kwargs)
def walk_bv_xor(self, formula, args, **kwargs):
return self._btor.Xor(*args)
def walk_bv_add(self, formula, args, **kwargs):
return self._btor.Add(args[0], args[1])
def walk_bv_sub(self, formula, args, **kwargs):
return self._btor.Sub(args[0], args[1])
def walk_bv_neg(self, formula, args, **kwargs):
return -args[0]
def walk_bv_mul(self, formula, args, **kwargs):
return args[0]*args[1]
def walk_bv_udiv(self, formula, args, **kwargs):
return args[0] / args[1]
def walk_bv_urem(self, formula, args, **kwargs):
return self._btor.Urem(args[0], args[1])
def walk_bv_lshl(self, formula, args, **kwargs):
return self._btor.Sll(args[0], args[1])
def walk_bv_lshr(self, formula, args, **kwargs):
return self._btor.Srl(args[0], args[1])
def walk_bv_rol(self, formula, args, **kwargs):
return self._btor.Rol(args[0],
formula.bv_rotation_step())
def walk_bv_ror(self, formula, args, **kwargs):
return self._btor.Ror(args[0],
formula.bv_rotation_step())
def walk_bv_zext(self, formula, args, **kwargs):
return self._btor.Uext(args[0], formula.bv_extend_step())
def walk_bv_sext(self, formula, args, **kwargs):
return self._btor.Sext(args[0], formula.bv_extend_step())
def walk_bv_slt(self, formula, args, **kwargs):
return self._btor.Slt(args[0], args[1])
def walk_bv_sle(self, formula, args, **kwargs):
return self._btor.Slte(args[0], args[1])
def walk_bv_comp(self, formula, args, **kwargs):
return self._btor.Eq(args[0], args[1])
def walk_bv_sdiv(self, formula, args, **kwargs):
return self._btor.Sdiv(args[0], args[1])
def walk_bv_srem(self, formula, args, **kwargs):
return self._btor.Srem(args[0], args[1])
def walk_bv_ashr (self, formula, args, **kwargs):
return self._btor.Sra(args[0], args[1])
def walk_array_store(self, formula, args, **kwargs):
return self._btor.Write(args[0], args[1], args[2])
def walk_array_select(self, formula, args, **kwargs):
return self._btor.Read(args[0], args[1])
def walk_array_value(self, formula, args, **kwargs):
raise ConvertExpressionError("btor does not support constant arrays")
def _type_to_btor(self, tp):
if tp.is_bool_type():
return self._btor.BoolSort()
elif tp.is_real_type():
raise ConvertExpressionError
elif tp.is_int_type():
raise ConvertExpressionError
elif tp.is_bv_type():
return self._btor.BitVecSort(tp.width)
elif tp.is_array_type():
raise ConvertExpressionError("Unsupported Array Type")
else:
assert tp.is_function_type() , "Unsupported type '%s'" % tp
stps = [self._type_to_btor(x) for x in tp.param_types]
rtp = self._type_to_btor(tp.return_type)
return self._btor.FunSort(stps, rtp)
def _extend_bv_pow2(self, btor_formula, signed=False):
"""BTOR requires that many operands have width that is a power of 2"""
w = btor_formula.width
target_w = 2**int(ceil(log(w, 2)))
# Skip if width is ok
if target_w == w:
return btor_formula
if signed:
return self._btor.Sext(btor_formula, (target_w-w))
else:
return self._btor.Uext(btor_formula, (target_w-w))
def _extend_bv_equal_width(self, arg1, arg2):
if arg1.width == arg2.width:
return (arg1, arg2)
elif arg1.width > arg2.width:
ext = arg1.width - arg2.width
return (arg1,
self._btor.Uext(arg2, ext))
elif arg1.width < arg2.width:
ext = arg2.width - arg1.width
return (self._btor.Uext(arg1, ext),
arg2)
| |
#import ddapp
from ddapp import cameraview
from ddapp import transformUtils
from ddapp import visualization as vis
from ddapp import objectmodel as om
from ddapp import ik
from ddapp.ikparameters import IkParameters
from ddapp.ikplanner import ConstraintSet
from ddapp import polarisplatformplanner
from ddapp import robotstate
from ddapp import segmentation
from ddapp import sitstandplanner
from ddapp.timercallback import TimerCallback
from ddapp import visualization as vis
from ddapp import planplayback
from ddapp import lcmUtils
from ddapp import affordancepanel
from ddapp.uuidutil import newUUID
import os
import functools
import numpy as np
import scipy.io
import vtkAll as vtk
import bot_core as lcmbotcore
from ddapp.tasks.taskuserpanel import TaskUserPanel
import ddapp.tasks.robottasks as rt
from ddapp import filterUtils
from ddapp import ioUtils
class PolarisModel(object):
def __init__(self):
self.aprilTagSubsciber = lcmUtils.addSubscriber('APRIL_TAG_TO_CAMERA_LEFT', lcmbotcore.rigid_transform_t, self.onAprilTag)
pose = transformUtils.poseFromTransform(vtk.vtkTransform())
desc = dict(classname='MeshAffordanceItem', Name='polaris',
Filename='software/models/polaris/polaris_cropped.vtp', pose=pose)
self.pointcloudAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc)
self.originFrame = self.pointcloudAffordance.getChildFrame()
self.originToAprilTransform = transformUtils.transformFromPose(np.array([-0.038508 , -0.00282131, -0.01000079]),
np.array([ 9.99997498e-01, -2.10472556e-03, -1.33815696e-04, 7.46246794e-04])) # offset for . . . who knows why
# t = transformUtils.transformFromPose(np.array([ 0.14376024, 0.95920689, 0.36655712]), np.array([ 0.28745842, 0.90741428, -0.28822068, 0.10438304]))
t = transformUtils.transformFromPose(np.array([ 0.10873244, 0.93162364, 0.40509084]),
np.array([ 0.32997378, 0.88498408, -0.31780588, 0.08318602]))
self.leftFootEgressStartFrame = vis.updateFrame(t, 'left foot start', scale=0.2,visible=True, parent=self.pointcloudAffordance)
t = transformUtils.transformFromPose(np.array([ 0.265, 0.874, 0.274]),
np.array([ 0.35290731, 0.93443693, -0.04181263, 0.02314636]))
self.leftFootEgressMidFrame = vis.updateFrame(t, 'left foot mid', scale=0.2,visible=True, parent=self.pointcloudAffordance)
t = transformUtils.transformFromPose(np.array([ 0.54339115, 0.89436275, 0.26681047]),
np.array([ 0.34635985, 0.93680077, -0.04152008, 0.02674412]))
self.leftFootEgressOutsideFrame = vis.updateFrame(t, 'left foot outside', scale=0.2,visible=True, parent=self.pointcloudAffordance)
# pose = [np.array([-0.78962299, 0.44284877, -0.29539116]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] #old location
# pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.54812954, 0.44571517, -0.46063251, 0.53731713])] # updated location
pose = [np.array([-0.78594663, 0.42026626, -0.23248139]), np.array([ 0.53047159, 0.46554963, -0.48086192, 0.52022615])] # update orientation
desc = dict(classname='CapsuleRingAffordanceItem', Name='Steering Wheel', uuid=newUUID(), pose=pose,
Color=[1, 0, 0], Radius=float(0.18), Segments=20)
self.steeringWheelAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc)
pose = [np.array([-0.05907324, 0.80460545, 0.45439687]), np.array([ 0.14288327, 0.685944 , -0.703969 , 0.11615873])]
desc = dict(classname='BoxAffordanceItem', Name='pedal', Dimensions=[0.12, 0.33, 0.04], pose=pose, Color=[0,1,0])
self.pedalAffordance = segmentation.affordanceManager.newAffordanceFromDescription(desc)
# t = transformUtils.transformFromPose(np.array([ 0.04045136, 0.96565326, 0.25810111]),
# np.array([ 0.26484648, 0.88360091, -0.37065556, -0.10825996]))
# t = transformUtils.transformFromPose(np.array([ -4.34908919e-04, 9.24901627e-01, 2.65614116e-01]),
# np.array([ 0.25022251, 0.913271 , -0.32136359, -0.00708626]))
t = transformUtils.transformFromPose(np.array([ 0.0384547 , 0.89273742, 0.24140762]),
np.array([ 0.26331831, 0.915796 , -0.28055337, 0.11519963]))
self.leftFootPedalSwingFrame = vis.updateFrame(t,'left foot pedal swing', scale=0.2, visible=True, parent=self.pointcloudAffordance)
t = transformUtils.transformFromPose(np.array([-0.9012598 , -0.05709763, 0.34897024]),
np.array([ 0.03879584, 0.98950919, 0.03820214, 0.13381721]))
self.leftFootDrivingFrame = vis.updateFrame(t,'left foot driving', scale=0.2, visible=True, parent=self.pointcloudAffordance)
# t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]),
# np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529]))
# t = transformUtils.transformFromPose(np.array([-0.14940375, 0.90347275, 0.23812658]),
# np.array([ 0.27150909, 0.91398724, -0.28877386, 0.0867167 ]))
# t = transformUtils.transformFromPose(np.array([-0.17331227, 0.87879312, 0.25645152]),
# np.array([ 0.26344489, 0.91567196, -0.28089824, 0.11505581]))
# self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance)
t = transformUtils.transformFromPose(np.array([-0.12702725, 0.92068409, 0.27209386]),
np.array([ 0.2062255 , 0.92155886, -0.30781119, 0.11598529]))
self.leftFootDrivingKneeInFrame = vis.updateFrame(t,'left foot driving knee in', scale=0.2, visible=True, parent=self.pointcloudAffordance)
t = transformUtils.transformFromPose(np.array([-0.13194951, 0.89871423, 0.24956246]),
np.array([ 0.21589082, 0.91727326, -0.30088849, 0.14651633]))
self.leftFootOnPedal = vis.updateFrame(t,'left foot on pedal', scale=0.2, visible=True, parent=self.pointcloudAffordance)
t = transformUtils.transformFromPose(np.array([ 0.17712239, 0.87619935, 0.27001509]),
np.array([ 0.33484372, 0.88280787, -0.31946488, 0.08044963]))
self.leftFootUpFrame = vis.updateFrame(t,'left foot up frame', scale=0.2, visible=True, parent=self.pointcloudAffordance)
t = transformUtils.transformFromPose(np.array([ 0.47214939, -0.04856998, 0.01375837]),
np.array([ 6.10521653e-03, 4.18621358e-04, 4.65520611e-01,
8.85015882e-01]))
self.rightHandGrabFrame = vis.updateFrame(t,'right hand grab bar', scale=0.2, visible=True, parent=self.pointcloudAffordance)
self.frameSync = vis.FrameSync()
self.frameSync.addFrame(self.originFrame)
self.frameSync.addFrame(self.pointcloudAffordance.getChildFrame(), ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootEgressStartFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootEgressMidFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootEgressOutsideFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.steeringWheelAffordance.getChildFrame(), ignoreIncoming=True)
self.frameSync.addFrame(self.pedalAffordance.getChildFrame(), ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootPedalSwingFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootDrivingFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootDrivingKneeInFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootOnPedal, ignoreIncoming=True)
self.frameSync.addFrame(self.leftFootUpFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.rightHandGrabFrame, ignoreIncoming=True)
def onAprilTag(self, msg):
t = vtk.vtkTransform()
cameraview.imageManager.queue.getTransform('april_tag_car_beam', 'local', msg.utime, t)
self.originFrame.copyFrame(transformUtils.concatenateTransforms([self.originToAprilTransform, t]))
class EgressPlanner(object):
def __init__(self, robotSystem):
self.pelvisLiftX = 0.0
self.pelvisLiftZ = 0.05
self.legLiftAngle = 8
self.coneThreshold = np.radians(5)
self.robotSystem = robotSystem
self.polaris = None
self.quasiStaticShrinkFactor = 0.1
self.maxFootTranslationSpeed = 0.05
self.maxHandTranslationSpeed = 0.1
self.plans = []
def spawnPolaris(self):
self.polaris = PolarisModel()
def createLeftFootPoseConstraint(self, targetFrame, tspan=[-np.inf,np.inf]):
positionConstraint, orientationConstraint = self.robotSystem.ikPlanner.createPositionOrientationConstraint('l_foot', targetFrame, vtk.vtkTransform())
positionConstraint.tspan = tspan
orientationConstraint.tspan = tspan
return positionConstraint, orientationConstraint
def createAllButLeftLegPostureConstraint(self, poseName):
joints = robotstate.matchJoints('^(?!l_leg)')
return self.robotSystem.ikPlanner.createPostureConstraint(poseName, joints)
def getPlanningStartPose(self):
return self.robotSystem.robotStateJointController.getPose('EST_ROBOT_STATE')
def addPlan(self, plan):
self.plans.append(plan)
def commitManipPlan(self):
self.robotSystem.manipPlanner.commitManipPlan(self.plans[-1])
def planEgressArms(self):
startPose = self.getPlanningStartPose()
endPose = self.robotSystem.ikPlanner.getMergedPostureFromDatabase(startPose, 'driving', 'egress-arms')
return self.robotSystem.ikPlanner.computePostureGoal(startPose, endPose)
def planGetWeightOverFeet(self):
startPose = self.getPlanningStartPose()
startPoseName = 'q_egress_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_egress_end'
constraints = []
constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=True, rightFootEnabled=True,
pelvisEnabled=False,
shrinkFactor=0.5))
constraints.append(self.robotSystem.ikPlanner.createLockedBasePostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(usePointwise=False, maxDegreesPerSecond=10)
constraintSet.runIk()
keyFramePlan = constraintSet.planEndPoseGoal(feetOnGround=False)
poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
ts = [poseTimes[0]]
supportsList = [['r_foot', 'l_foot', 'pelvis']]
plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, True)
self.addPlan(plan)
return plan
def planStandUp(self):
startPose = self.getPlanningStartPose()
startPoseName = 'q_egress_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_egress_end'
pelvisFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('pelvis', startPose)
t = transformUtils.frameFromPositionAndRPY([self.pelvisLiftX, 0, self.pelvisLiftZ], [0, 0, 0])
liftFrame = transformUtils.concatenateTransforms([t, pelvisFrame])
constraints = []
utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
g = self.createUtorsoGazeConstraints([1.0, 1.0])
constraints.append(g[1])
p = ik.PositionConstraint(linkName='pelvis', referenceFrame=liftFrame,
lowerBound=np.array([0.0, -np.inf, 0.0]),
upperBound=np.array([np.inf, np.inf, 0.0]))
constraints.append(p)
constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=True, rightFootEnabled=True, pelvisEnabled=False,
shrinkFactor=self.quasiStaticShrinkFactor))
constraints.append(self.robotSystem.ikPlanner.createXYZMovingBasePostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
constraints.extend(self.robotSystem.ikPlanner.createFixedFootConstraints(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseMetersPerSecond=0.02)
constraintSet.runIk()
keyFramePlan = constraintSet.planEndPoseGoal(feetOnGround=True)
poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
ts = [poseTimes[0]]
supportsList = [['r_foot', 'l_foot']]
plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, True)
self.addPlan(plan)
return plan
def createUtorsoGazeConstraints(self, tspan):
constraints = []
g = ik.WorldGazeDirConstraint()
g.linkName = 'utorso'
g.targetFrame = vtk.vtkTransform()
axes = transformUtils.getAxesFromTransform(self.polaris.leftFootEgressOutsideFrame.transform)
g.targetAxis = axes[0]
g.bodyAxis = [1,0,0]
g.coneThreshold = self.coneThreshold
g.tspan = tspan
constraints.append(g)
g = ik.WorldGazeDirConstraint()
g.linkName = 'utorso'
g.targetFrame = vtk.vtkTransform()
g.targetAxis = [0,0,1]
g.bodyAxis = [0,0,1]
g.coneThreshold = self.coneThreshold
g.tspan = tspan
constraints.append(g)
return constraints
def planFootEgress(self):
def saveOriginalTraj(name):
commands = ['%s = qtraj_orig;' % name]
self.robotSystem.ikServer.comm.sendCommands(commands)
def concatenateAndRescaleTrajectories(trajectoryNames, concatenatedTrajectoryName, junctionTimesName, ikParameters):
commands = []
commands.append('joint_v_max = repmat(%s*pi/180, r.getNumVelocities()-6, 1);' % ikParameters.maxDegreesPerSecond)
commands.append('xyz_v_max = repmat(%s, 3, 1);' % ikParameters.maxBaseMetersPerSecond)
commands.append('rpy_v_max = repmat(%s*pi/180, 3, 1);' % ikParameters.maxBaseRPYDegreesPerSecond)
commands.append('v_max = [xyz_v_max; rpy_v_max; joint_v_max];')
commands.append("max_body_translation_speed = %r;" % ikParameters.maxBodyTranslationSpeed)
commands.append("max_body_rotation_speed = %r;" % ikParameters.maxBodyRotationSpeed)
commands.append('rescale_body_ids = [%s];' % (','.join(['links.%s' % linkName for linkName in ikParameters.rescaleBodyNames])))
commands.append('rescale_body_pts = reshape(%s, 3, []);' % ik.ConstraintBase.toColumnVectorString(ikParameters.rescaleBodyPts))
commands.append("body_rescale_options = struct('body_id',rescale_body_ids,'pts',rescale_body_pts,'max_v',max_body_translation_speed,'max_theta',max_body_rotation_speed,'robot',r);")
commands.append('trajectories = {};')
for name in trajectoryNames:
commands.append('trajectories{end+1} = %s;' % name)
commands.append('[%s, %s] = concatAndRescaleTrajectories(trajectories, v_max, %s, %s, body_rescale_options);' % (concatenatedTrajectoryName, junctionTimesName, ikParameters.accelerationParam, ikParameters.accelerationFraction))
commands.append('s.publishTraj(%s, 1);' % concatenatedTrajectoryName)
self.robotSystem.ikServer.comm.sendCommands(commands)
return self.robotSystem.ikServer.comm.getFloatArray(junctionTimesName)
self.planShiftWeightOut()
shiftWeightName = 'qtraj_shift_weight'
saveOriginalTraj(shiftWeightName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan.plan[-1])
self.planFootOut(startPose=nextStartPose, finalFootHeight=0.0)
footOutName = 'qtraj_foot_out'
saveOriginalTraj(footOutName)
nextStartPose = robotstate.convertStateMessageToDrakePose(self.plans[-1].plan.plan[-1])
self.planCenterWeight(startPose=nextStartPose)
centerWeightName = 'qtraj_center_weight'
saveOriginalTraj(centerWeightName)
ikParameters = IkParameters(usePointwise=True, maxBaseRPYDegreesPerSecond=10,
rescaleBodyNames=['l_foot'], rescaleBodyPts=[0.0, 0.0, 0.0],
maxBodyTranslationSpeed=self.maxFootTranslationSpeed)
ikParameters = self.robotSystem.ikPlanner.mergeWithDefaultIkParameters(ikParameters)
listener = self.robotSystem.ikPlanner.getManipPlanListener()
supportTimes = concatenateAndRescaleTrajectories([shiftWeightName, footOutName, centerWeightName],
'qtraj_foot_egress', 'ts', ikParameters)
keyFramePlan = listener.waitForResponse()
listener.finish()
supportsList = []
supportsList.append(['l_foot', 'r_foot'])
supportsList.append(['r_foot'])
supportsList.append(['l_foot', 'r_foot'])
supportsList.append(['l_foot', 'r_foot'])
plan = self.publishPlanWithSupports(keyFramePlan, supportsList, supportTimes, True)
self.addPlan(plan)
def planShiftWeightOut(self, startPose=None):
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_egress_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_egress_end'
constraints = []
utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
constraints.extend(self.createUtorsoGazeConstraints([1.0, 1.0]))
constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=False, rightFootEnabled=True,
pelvisEnabled=False,
shrinkFactor=self.quasiStaticShrinkFactor))
constraints.append(self.robotSystem.ikPlanner.createXYZMovingBasePostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
constraints.append(self.robotSystem.ikPlanner.createFixedLinkConstraints(startPoseName, 'l_foot'))
constraints.append(self.robotSystem.ikPlanner.createFixedLinkConstraints(startPoseName, 'r_foot'))
constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseMetersPerSecond=0.02)
constraintSet.runIk()
keyFramePlan = constraintSet.planEndPoseGoal(feetOnGround=False)
poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
ts = [poseTimes[0]]
supportsList = [['r_foot', 'l_foot']]
plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, True)
self.addPlan(plan)
return plan
def computeLeftFootOverPlatformFrame(self, startPose, height):
lFoot2World = transformUtils.copyFrame(self.polaris.leftFootEgressOutsideFrame.transform)
rFoot2World = self.robotSystem.ikPlanner.getLinkFrameAtPose('r_foot', startPose)
lFoot2World = transformUtils.copyFrame(rFoot2World)
lFoot2World.PreMultiply()
lFoot2World.Translate([0.05, 0.26, height])
return lFoot2World
def planFootOut(self, startPose=None, finalFootHeight=0.05):
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_egress_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_egress_end'
utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
finalLeftFootFrame = self.computeLeftFootOverPlatformFrame(startPose, finalFootHeight)
constraints = []
constraints.extend(self.createUtorsoGazeConstraints([0.0, 1.0]))
constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=False, rightFootEnabled=True,
pelvisEnabled=False, shrinkFactor=0.01))
constraints.append(self.robotSystem.ikPlanner.createMovingBaseSafeLimitsConstraint())
constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
#constraints.append(self.robotSystem.ikPlanner.createLockedBackPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
constraints.append(self.robotSystem.ikPlanner.createFixedLinkConstraints(startPoseName, 'r_foot'))
constraints.extend(self.createLeftFootPoseConstraint(finalLeftFootFrame, tspan=[1,1]))
constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(usePointwise=True, maxBaseRPYDegreesPerSecond=10,
rescaleBodyNames=['l_foot'],
rescaleBodyPts=[0.0, 0.0, 0.0],
maxBodyTranslationSpeed=self.maxFootTranslationSpeed)
#constraintSet.seedPoseName = 'q_start'
#constraintSet.nominalPoseName = 'q_start'
constraintSet.runIk()
footFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('l_foot', startPose)
t = transformUtils.frameFromPositionAndRPY([0, 0, self.polaris.leftFootEgressOutsideFrame.transform.GetPosition()[2]-footFrame.GetPosition()[2]], [0, 0, 0])
liftFrame = transformUtils.concatenateTransforms([footFrame, t])
vis.updateFrame(liftFrame, 'lift frame')
c = ik.WorldFixedOrientConstraint()
c.linkName = 'l_foot'
c.tspan = [0.0, 0.1, 0.2]
constraints.append(c)
constraints.extend(self.createLeftFootPoseConstraint(liftFrame, tspan=[0.2, 0.2]))
constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressMidFrame, tspan=[0.5, 0.5]))
constraints.extend(self.createLeftFootPoseConstraint(self.polaris.leftFootEgressOutsideFrame, tspan=[0.8, 0.8]))
#plan = constraintSet.planEndPoseGoal(feetOnGround=False)
keyFramePlan = constraintSet.runIkTraj()
poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
ts = [poseTimes[0]]
supportsList = [['r_foot']]
plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, False)
self.addPlan(plan)
return plan
def planLeftFootDown(self):
startPose = self.getPlanningStartPose()
startPoseName = 'q_footdown_start'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_footdown_end'
utorsoFrame = self.robotSystem.ikPlanner.getLinkFrameAtPose('utorso', startPose)
finalLeftFootFrame = self.computeLeftFootOverPlatformFrame(startPose, 0.0)
constraints = []
constraints.extend(self.createUtorsoGazeConstraints([0.0, 1.0]))
constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=False, rightFootEnabled=True,
pelvisEnabled=False, shrinkFactor=0.01))
constraints.append(self.robotSystem.ikPlanner.createMovingBaseSafeLimitsConstraint())
constraints.append(self.robotSystem.ikPlanner.createLockedLeftArmPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createLockedRightArmPostureConstraint(startPoseName))
#constraints.append(self.robotSystem.ikPlanner.createLockedBackPostureConstraint(startPoseName))
constraints.append(self.robotSystem.ikPlanner.createFixedLinkConstraints(startPoseName, 'r_foot'))
constraints.extend(self.createLeftFootPoseConstraint(finalLeftFootFrame, tspan=[1,1]))
constraintSet = ConstraintSet(self.robotSystem.ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.ikParameters = IkParameters(usePointwise=True)
#constraintSet.seedPoseName = 'q_start'
#constraintSet.nominalPoseName = 'q_start'
endPose, _ = constraintSet.runIk()
keyFramePlan = constraintSet.planEndPoseGoal(feetOnGround=False)
poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
ts = [poseTimes[0], poseTimes[-1]]
supportsList = [['r_foot'], ['r_foot','l_foot']]
plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, True)
self.addPlan(plan)
return plan, endPose
def planCenterWeight(self, startPose=None):
ikPlanner = self.robotSystem.ikPlanner
if startPose is None:
startPose = self.getPlanningStartPose()
startPoseName = 'q_lean_right'
self.robotSystem.ikPlanner.addPose(startPose, startPoseName)
endPoseName = 'q_egress_end'
footFixedConstraints = ikPlanner.createFixedFootConstraints(startPoseName)
backConstraint = ikPlanner.createMovingBackLimitedPostureConstraint()
armsLocked = ikPlanner.createLockedArmsPostureConstraints(startPoseName)
constraints = [backConstraint]
constraints.extend(footFixedConstraints)
constraints.extend(armsLocked)
constraints.append(ik.QuasiStaticConstraint(leftFootEnabled=True, rightFootEnabled=True,
pelvisEnabled=False,
shrinkFactor=self.quasiStaticShrinkFactor))
constraints.append(self.robotSystem.ikPlanner.createKneePostureConstraint([0.7, 2.5]))
constraintSet = ConstraintSet(ikPlanner, constraints, endPoseName, startPoseName)
constraintSet.seedPoseName = 'q_start'
constraintSet.nominalPoseName = 'q_nom'
endPose = constraintSet.runIk()
keyFramePlan = constraintSet.planEndPoseGoal()
poseTimes, poses = planplayback.PlanPlayback.getPlanPoses(keyFramePlan)
ts = [poseTimes[0]]
supportsList = [['r_foot','l_foot']]
plan = self.publishPlanWithSupports(keyFramePlan, supportsList, ts, True)
self.addPlan(plan)
return plan
def planFootDownAndCenterWeight(self):
leftFootDownPlan, leftFootDownEndPose = self.planLeftFootDown()
centerWeightPlan = self.planCenterWeight(startPose=leftFootDownEndPose)
# now we need to combine these plans
footDownEndTime = leftFootDownPlan.plan.plan[-1].utime
robotPlan = leftFootDownPlan
robotPlan.plan.plan_info = list(robotPlan.plan.plan_info)
for state, info in zip(centerWeightPlan.plan.plan, centerWeightPlan.plan.plan_info):
state.utime += footDownEndTime
robotPlan.plan.plan.append(state)
robotPlan.plan.plan_info.append(info)
robotPlan.plan.num_states = len(robotPlan.plan.plan)
# make support sequence
for support, t in zip(centerWeightPlan.support_sequence.supports, centerWeightPlan.support_sequence.ts):
t += footDownEndTime
robotPlan.support_sequence.ts.append(t)
robotPlan.support_sequence.supports.append(support)
robotPlan.is_quasistatic = True
self.addPlan(robotPlan)
lcmUtils.publish('CANDIDATE_ROBOT_PLAN_WITH_SUPPORTS', robotPlan)
return robotPlan
def planArmsForward(self):
q0 = self.getPlanningStartPose()
q1 = self.robotSystem.ikPlanner.getMergedPostureFromDatabase(q0, 'General', 'hands-forward', side='left')
q2 = self.robotSystem.ikPlanner.getMergedPostureFromDatabase(q1, 'General', 'hands-forward', side='right')
a = 0.25
q1 = (1-a)*q1 + a*np.array(q2)
ikParameters = IkParameters(usePointwise=True, maxBaseRPYDegreesPerSecond=10,
rescaleBodyNames=['l_hand', 'r_hand'],
rescaleBodyPts=list(self.robotSystem.ikPlanner.getPalmPoint(side='left')) +
list(self.robotSystem.ikPlanner.getPalmPoint(side='right')),
maxBodyTranslationSpeed=3*self.maxHandTranslationSpeed)
plan = self.robotSystem.ikPlanner.computeMultiPostureGoal([q0, q1, q2], ikParameters=ikParameters)
self.addPlan(plan)
return plan
def publishPlanWithSupports(self, keyFramePlan, supportsList, ts, isQuasistatic):
manipPlanner = self.robotSystem.manipPlanner
msg_robot_plan_t = manipPlanner.convertKeyframePlan(keyFramePlan)
supports = manipPlanner.getSupportLCMFromListOfSupports(supportsList,ts)
msg_robot_plan_with_supports_t = manipPlanner.convertPlanToPlanWithSupports(msg_robot_plan_t, supports, ts, isQuasistatic)
lcmUtils.publish('CANDIDATE_ROBOT_PLAN_WITH_SUPPORTS', msg_robot_plan_with_supports_t)
return msg_robot_plan_with_supports_t
def getFrameToOriginTransform(self, t):
tCopy = transformUtils.copyFrame(t)
tCopy.PostMultiply()
tCopy.Concatenate(self.polaris.originFrame.transform.GetLinearInverse())
print transformUtils.poseFromTransform(tCopy)
return tCopy
class EgressPanel(TaskUserPanel):
def __init__(self, robotSystem):
TaskUserPanel.__init__(self, windowTitle='Egress')
self.robotSystem = robotSystem
self.egressPlanner = EgressPlanner(robotSystem)
self.platformPlanner = polarisplatformplanner.PolarisPlatformPlanner(robotSystem.ikServer, robotSystem)
self.addDefaultProperties()
self.addButtons()
self.addTasks()
def addButtons(self):
# Get onto platform buttons
self.addManualButton('Spawn Polaris', self.egressPlanner.spawnPolaris)
self.addManualButton('Get weight over feet', self.egressPlanner.planGetWeightOverFeet)
self.addManualButton('Stand up', self.egressPlanner.planStandUp)
self.addManualButton('Step out', self.egressPlanner.planFootEgress)
self.addManualButton('Arms forward', self.egressPlanner.planArmsForward)
self.addManualSpacer()
#sit/stand buttons
self.addManualButton('Start', self.onStart)
# polaris step down buttons
self.addManualButton('Fit Platform Affordance', self.platformPlanner.fitRunningBoardAtFeet)
self.addManualButton('Spawn Ground Affordance', self.platformPlanner.spawnGroundAffordance)
self.addManualButton('Raycast Terrain', self.platformPlanner.requestRaycastTerrain)
self.addManualButton('Update Affordance', self.platformPlanner.updateAffordance)
self.addManualButton('Arms Up',self.onArmsUp)
self.addManualButton('Plan Step Down', self.onPlanStepDown)
self.addManualButton('Plan Step Off', self.onPlanStepOff)
def addDefaultProperties(self):
self.params.addProperty('Step Off Direction', 0, attributes=om.PropertyAttributes(enumNames=['Forwards','Sideways']))
def _syncProperties(self):
self.stepOffDirection = self.params.getPropertyEnumValue('Step Off Direction').lower()
def onStart(self):
self._syncProperties()
print 'Egress Planner Ready'
def onUpdateAffordance(self):
if not self.platformPlanner.initializedFlag:
self.platformPlanner.initialize()
self.platformPlanner.updateAffordance()
def onPlan(self,planType):
self._syncProperties()
def onPlanTurn(self):
self._syncProperties()
self.platformPlanner.planTurn()
def onArmsUp(self):
self.platformPlanner.planArmsUp(self.stepOffDirection)
def onPropertyChanged(self, propertySet, propertyName):
self._syncProperties()
def onPlanStepDown(self):
self._syncProperties()
if self.stepOffDirection == 'forwards':
self.platformPlanner.planStepDownForwards()
else:
self.platformPlanner.planStepDown()
def onPlanWeightShift(self):
self._syncProperties()
if self.stepOffDirection == 'forwards':
self.platformPlanner.planWeightShiftForwards()
else:
self.platformPlanner.planWeightShift()
def onPlanStepOff(self):
self._syncProperties()
if self.stepOffDirection == 'forwards':
self.platformPlanner.planStepOffForwards()
else:
self.platformPlanner.planStepOff()
def addTasks(self):
# some helpers
self.folder = None
def addTask(task, parent=None):
parent = parent or self.folder
self.taskTree.onAddTask(task, copy=False, parent=parent)
def addFunc(func, name, parent=None):
addTask(rt.CallbackTask(callback=func, name=name), parent=parent)
def addFolder(name, parent=None):
self.folder = self.taskTree.addGroup(name, parent=parent)
return self.folder
def addManipTask(name, planFunc, userPrompt=False, planner=None):
if planner is None:
planner = self.platformPlanner
prevFolder = self.folder
addFolder(name, prevFolder)
addFunc(planFunc, 'plan')
if not userPrompt:
addTask(rt.CheckPlanInfo(name='check manip plan info'))
else:
addTask(rt.UserPromptTask(name='approve manip plan', message='Please approve manipulation plan.'))
addFunc(planner.commitManipPlan, name='execute manip plan')
addTask(rt.WaitForManipulationPlanExecution(name='wait for manip execution'))
self.folder = prevFolder
pp = self.platformPlanner
ep = self.egressPlanner
stepPrep = addFolder('Prep')
self.folder = stepPrep
addTask(rt.UserPromptTask(name="Verify SE processes", message="Please confirm that all SE processes have started"))
addTask(rt.UserPromptTask(name="Run Init Nav", message='Please click "Init Nav"'))
addTask(rt.UserPromptTask(name="Stop April Tags", message='Please stop the "April Tags" process'))
addTask(rt.UserPromptTask(name="Confirm pressure", message='Set high pressure for egress'))
addTask(rt.UserPromptTask(name="Disable recovery and bracing", message="Please disable recovery and bracing"))
addTask(rt.SetNeckPitch(name='set neck position', angle=60))
stepOut = addFolder('Step out of car')
self.folder = stepOut
addManipTask('Get weight over feet', ep.planGetWeightOverFeet, userPrompt=True, planner=ep)
addManipTask('Stand up', ep.planStandUp, userPrompt=True, planner=ep)
addManipTask('Step out', ep.planFootEgress, userPrompt=True, planner=ep)
addManipTask('Move arms up for walking', ep.planArmsForward, userPrompt=True, planner=ep)
prep = addFolder('Step down prep')
addFunc(self.onStart, 'start')
addFunc(pp.switchToPolarisPlatformParameters, "Switch walking params to 'Polaris Platform")
addTask(rt.UserPromptTask(name="wait for lidar", message="Please wait for next lidar sweep"))
self.folder = prep
addFunc(pp.fitRunningBoardAtFeet, 'fit running board')
addTask(rt.FindAffordance(name='confirm running board affordance', affordanceName='running board'))
addFunc(pp.spawnGroundAffordance, 'spawn ground affordance')
addFunc(pp.requestRaycastTerrain, 'raycast terrain')
addTask(rt.UserPromptTask(name="wait for raycast terrain", message="wait for raycast terrain"))
folder = addFolder('Step Down')
addFunc(self.onPlanStepDown, 'plan step down')
addTask(rt.UserPromptTask(name="approve footsteps, set support contact group",
message="Please approve/modify footsteps."))
addFunc(self.robotSystem.footstepsDriver.onExecClicked, 'commit footstep plan')
addTask(rt.WaitForWalkExecution(name='wait for walking'))
folder = addFolder('Step Off')
# addTask(rt.UserPromptTask(name="wait for lidar sweep", message="wait for lidar sweep before spawning ground affordance"))
addFunc(pp.spawnFootplaneGroundAffordance, 'spawn footplane ground affordance')
addFunc(pp.requestRaycastTerrain, 'raycast terrain')
addTask(rt.UserPromptTask(name="wait for raycast terrain", message="wait for raycast terrain"))
addFunc(self.onPlanStepOff, 'plan step off')
addTask(rt.UserPromptTask(name="approve footsteps", message="Please approve footsteps, modify if necessary"))
addFunc(self.robotSystem.footstepsDriver.onExecClicked, 'commit footstep plan')
addTask(rt.WaitForWalkExecution(name='wait for walking'))
addManipTask('plan nominal', pp.planNominal, userPrompt=True)
addTask(rt.UserPromptTask(name="reset walking parameters", message="Please set walking parameters to drake nominal"))
addTask(rt.SetNeckPitch(name='set neck position', angle=20))
| |
# Copyright 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
scheduler_group = cfg.OptGroup(name="scheduler",
title="Scheduler configuration")
scheduler_opts = [
cfg.StrOpt("host_manager",
default="host_manager",
choices=("host_manager", "ironic_host_manager"),
deprecated_name="scheduler_host_manager",
deprecated_group="DEFAULT",
help="""
The scheduler host manager to use.
The host manager manages the in-memory picture of the hosts that the scheduler
uses. The options values are chosen from the entry points under the namespace
'nova.scheduler.host_manager' in 'setup.cfg'.
"""),
cfg.StrOpt("driver",
default="filter_scheduler",
deprecated_name="scheduler_driver",
deprecated_group="DEFAULT",
help="""
The class of the driver used by the scheduler. This should be chosen from one
of the entrypoints under the namespace 'nova.scheduler.driver' of file
'setup.cfg'. If nothing is specified in this option, the 'filter_scheduler' is
used.
Other options are:
* 'caching_scheduler' which aggressively caches the system state for better
individual scheduler performance at the risk of more retries when running
multiple schedulers. [DEPRECATED]
* 'chance_scheduler' which simply picks a host at random. [DEPRECATED]
* 'fake_scheduler' which is used for testing.
Possible values:
* Any of the drivers included in Nova:
** filter_scheduler
** caching_scheduler
** chance_scheduler
** fake_scheduler
* You may also set this to the entry point name of a custom scheduler driver,
but you will be responsible for creating and maintaining it in your setup.cfg
file.
"""),
cfg.IntOpt("periodic_task_interval",
default=60,
deprecated_name="scheduler_driver_task_period",
deprecated_group="DEFAULT",
help="""
Periodic task interval.
This value controls how often (in seconds) to run periodic tasks in the
scheduler. The specific tasks that are run for each period are determined by
the particular scheduler being used.
If this is larger than the nova-service 'service_down_time' setting, Nova may
report the scheduler service as down. This is because the scheduler driver is
responsible for sending a heartbeat and it will only do that as often as this
option allows. As each scheduler can work a little differently than the others,
be sure to test this with your selected scheduler.
Possible values:
* An integer, where the integer corresponds to periodic task interval in
seconds. 0 uses the default interval (60 seconds). A negative value disables
periodic tasks.
Related options:
* ``nova-service service_down_time``
"""),
cfg.IntOpt("max_attempts",
default=3,
min=1,
deprecated_name="scheduler_max_attempts",
deprecated_group="DEFAULT",
help="""
Maximum number of schedule attempts for a chosen host.
This is the maximum number of attempts that will be made to schedule an
instance before it is assumed that the failures aren't due to normal occasional
race conflicts, but rather some other problem. When this is reached a
MaxRetriesExceeded exception is raised, and the instance is set to an error
state.
Possible values:
* A positive integer, where the integer corresponds to the max number of
attempts that can be made when scheduling an instance.
"""),
cfg.IntOpt("discover_hosts_in_cells_interval",
default=-1,
min=-1,
help="""
Periodic task interval.
This value controls how often (in seconds) the scheduler should attempt
to discover new hosts that have been added to cells. If negative (the
default), no automatic discovery will occur.
Deployments where compute nodes come and go frequently may want this
enabled, where others may prefer to manually discover hosts when one
is added to avoid any overhead from constantly checking. If enabled,
every time this runs, we will select any unmapped hosts out of each
cell database on every run.
"""),
]
filter_scheduler_group = cfg.OptGroup(name="filter_scheduler",
title="Filter scheduler options")
filter_scheduler_opts = [
cfg.IntOpt("host_subset_size",
default=1,
min=1,
deprecated_name="scheduler_host_subset_size",
deprecated_group="DEFAULT",
help="""
Size of subset of best hosts selected by scheduler.
New instances will be scheduled on a host chosen randomly from a subset of the
N best hosts, where N is the value set by this option.
Setting this to a value greater than 1 will reduce the chance that multiple
scheduler processes handling similar requests will select the same host,
creating a potential race condition. By selecting a host randomly from the N
hosts that best fit the request, the chance of a conflict is reduced. However,
the higher you set this value, the less optimal the chosen host may be for a
given request.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* An integer, where the integer corresponds to the size of a host subset. Any
integer is valid, although any value less than 1 will be treated as 1
"""),
cfg.IntOpt("max_io_ops_per_host",
default=8,
deprecated_group="DEFAULT",
help="""
The number of instances that can be actively performing IO on a host.
Instances performing IO includes those in the following states: build, resize,
snapshot, migrate, rescue, unshelve.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'io_ops_filter' filter is enabled.
Possible values:
* An integer, where the integer corresponds to the max number of instances
that can be actively performing IO on any given host.
"""),
cfg.IntOpt("max_instances_per_host",
default=50,
min=1,
deprecated_group="DEFAULT",
help="""
Maximum number of instances that be active on a host.
If you need to limit the number of instances on any given host, set this option
to the maximum number of instances you want to allow. The num_instances_filter
will reject any host that has at least as many instances as this option's
value.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'num_instances_filter' filter is enabled.
Possible values:
* An integer, where the integer corresponds to the max instances that can be
scheduled on a host.
"""),
cfg.BoolOpt("track_instance_changes",
default=True,
deprecated_name="scheduler_tracks_instance_changes",
deprecated_group="DEFAULT",
help="""
Enable querying of individual hosts for instance information.
The scheduler may need information about the instances on a host in order to
evaluate its filters and weighers. The most common need for this information is
for the (anti-)affinity filters, which need to choose a host based on the
instances already running on a host.
If the configured filters and weighers do not need this information, disabling
this option will improve performance. It may also be disabled when the tracking
overhead proves too heavy, although this will cause classes requiring host
usage data to query the database on each request instead.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
NOTE: In a multi-cell (v2) setup where the cell MQ is separated from the
top-level, computes cannot directly communicate with the scheduler. Thus,
this option cannot be enabled in that scenario. See also the
[workarounds]/disable_group_policy_check_upcall option.
"""),
cfg.MultiStrOpt("available_filters",
default=["nova.scheduler.filters.all_filters"],
deprecated_name="scheduler_available_filters",
deprecated_group="DEFAULT",
help="""
Filters that the scheduler can use.
An unordered list of the filter classes the nova scheduler may apply. Only the
filters specified in the 'scheduler_enabled_filters' option will be used, but
any filter appearing in that option must also be included in this list.
By default, this is set to all filters that are included with nova.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a filter that may be used for selecting a host
Related options:
* scheduler_enabled_filters
"""),
cfg.ListOpt("enabled_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ServerGroupAntiAffinityFilter",
"ServerGroupAffinityFilter",
],
deprecated_name="scheduler_default_filters",
deprecated_group="DEFAULT",
help="""
Filters that the scheduler will use.
An ordered list of filter class names that will be used for filtering
hosts. These filters will be applied in the order they are listed so
place your most restrictive filters first to make the filtering process more
efficient.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a filter to be used for selecting a host
Related options:
* All of the filters in this option *must* be present in the
'scheduler_available_filters' option, or a SchedulerHostFilterNotFound
exception will be raised.
"""),
cfg.ListOpt("baremetal_enabled_filters",
default=[
"RetryFilter",
"AvailabilityZoneFilter",
"ComputeFilter",
"ComputeCapabilitiesFilter",
"ImagePropertiesFilter",
"ExactRamFilter",
"ExactDiskFilter",
"ExactCoreFilter",
],
deprecated_name="baremetal_scheduler_default_filters",
deprecated_group="DEFAULT",
deprecated_for_removal=True,
deprecated_reason="""
These filters were used to overcome some of the baremetal scheduling
limitations in Nova prior to the use of the Placement API. Now scheduling will
use the custom resource class defined for each baremetal node to make its
selection.
""",
help="""
Filters used for filtering baremetal hosts.
Filters are applied in order, so place your most restrictive filters first to
make the filtering process more efficient.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a filter to be used for selecting a baremetal host
Related options:
* If the 'scheduler_use_baremetal_filters' option is False, this option has
no effect.
"""),
cfg.BoolOpt("use_baremetal_filters",
deprecated_name="scheduler_use_baremetal_filters",
deprecated_group="DEFAULT",
deprecated_for_removal=True,
deprecated_reason="""
These filters were used to overcome some of the baremetal scheduling
limitations in Nova prior to the use of the Placement API. Now scheduling will
use the custom resource class defined for each baremetal node to make its
selection.
""",
default=False,
help="""
Enable baremetal filters.
Set this to True to tell the nova scheduler that it should use the filters
specified in the 'baremetal_scheduler_enabled_filters' option. If you are not
scheduling baremetal nodes, leave this at the default setting of False.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Related options:
* If this option is set to True, then the filters specified in the
'baremetal_scheduler_enabled_filters' are used instead of the filters
specified in 'scheduler_enabled_filters'.
"""),
cfg.ListOpt("weight_classes",
default=["nova.scheduler.weights.all_weighers"],
deprecated_name="scheduler_weight_classes",
deprecated_group="DEFAULT",
help="""
Weighers that the scheduler will use.
Only hosts which pass the filters are weighed. The weight for any host starts
at 0, and the weighers order these hosts by adding to or subtracting from the
weight assigned by the previous weigher. Weights may become negative. An
instance will be scheduled to one of the N most-weighted hosts, where N is
'scheduler_host_subset_size'.
By default, this is set to all weighers that are included with Nova.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more strings, where each string corresponds to the name of
a weigher that will be used for selecting a host
"""),
cfg.FloatOpt("ram_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Ram weight multipler ratio.
This option determines how hosts with more or less available RAM are weighed. A
positive value will result in the scheduler preferring hosts with more
available RAM, and a negative number will result in the scheduler preferring
hosts with less available RAM. Another way to look at it is that positive
values for this option will tend to spread instances across many hosts, while
negative values will tend to fill up (stack) hosts as much as possible before
scheduling to a less-used host. The absolute value, whether positive or
negative, controls how strong the RAM weigher is relative to other weighers.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'ram' weigher is enabled.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
cfg.FloatOpt("disk_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Disk weight multipler ratio.
Multiplier used for weighing free disk space. Negative numbers mean to
stack vs spread.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'disk' weigher is enabled.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
cfg.FloatOpt("io_ops_weight_multiplier",
default=-1.0,
deprecated_group="DEFAULT",
help="""
IO operations weight multipler ratio.
This option determines how hosts with differing workloads are weighed. Negative
values, such as the default, will result in the scheduler preferring hosts with
lighter workloads whereas positive values will prefer hosts with heavier
workloads. Another way to look at it is that positive values for this option
will tend to schedule instances onto hosts that are already busy, while
negative values will tend to distribute the workload across more hosts. The
absolute value, whether positive or negative, controls how strong the io_ops
weigher is relative to other weighers.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'io_ops' weigher is enabled.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
"""),
cfg.FloatOpt("pci_weight_multiplier",
default=1.0,
min=0.0,
help="""
PCI device affinity weight multiplier.
The PCI device affinity weighter computes a weighting based on the number of
PCI devices on the host and the number of PCI devices requested by the
instance. The ``NUMATopologyFilter`` filter must be enabled for this to have
any significance. For more information, refer to the filter documentation:
https://docs.openstack.org/developer/nova/filter_scheduler.html
Possible values:
* A positive integer or float value, where the value corresponds to the
multiplier ratio for this weigher.
"""),
# TODO(sfinucan): Add 'min' parameter and remove warning in 'affinity.py'
cfg.FloatOpt("soft_affinity_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Multiplier used for weighing hosts for group soft-affinity.
Possible values:
* An integer or float value, where the value corresponds to weight multiplier
for hosts with group soft affinity. Only a positive value are meaningful, as
negative values would make this behave as a soft anti-affinity weigher.
"""),
cfg.FloatOpt(
"soft_anti_affinity_weight_multiplier",
default=1.0,
deprecated_group="DEFAULT",
help="""
Multiplier used for weighing hosts for group soft-anti-affinity.
Possible values:
* An integer or float value, where the value corresponds to weight multiplier
for hosts with group soft anti-affinity. Only a positive value are
meaningful, as negative values would make this behave as a soft affinity
weigher.
"""),
# TODO(mikal): replace this option with something involving host aggregates
cfg.ListOpt("isolated_images",
default=[],
deprecated_group="DEFAULT",
help="""
List of UUIDs for images that can only be run on certain hosts.
If there is a need to restrict some images to only run on certain designated
hosts, list those image UUIDs here.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
Possible values:
* A list of UUID strings, where each string corresponds to the UUID of an
image
Related options:
* scheduler/isolated_hosts
* scheduler/restrict_isolated_hosts_to_isolated_images
"""),
cfg.ListOpt("isolated_hosts",
default=[],
deprecated_group="DEFAULT",
help="""
List of hosts that can only run certain images.
If there is a need to restrict some images to only run on certain designated
hosts, list those host names here.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled.
Possible values:
* A list of strings, where each string corresponds to the name of a host
Related options:
* scheduler/isolated_images
* scheduler/restrict_isolated_hosts_to_isolated_images
"""),
cfg.BoolOpt(
"restrict_isolated_hosts_to_isolated_images",
default=True,
deprecated_group="DEFAULT",
help="""
Prevent non-isolated images from being built on isolated hosts.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'IsolatedHostsFilter' filter is enabled. Even
then, this option doesn't affect the behavior of requests for isolated images,
which will *always* be restricted to isolated hosts.
Related options:
* scheduler/isolated_images
* scheduler/isolated_hosts
"""),
cfg.StrOpt(
"aggregate_image_properties_isolation_namespace",
deprecated_group="DEFAULT",
help="""
Image property namespace for use in the host aggregate.
Images and hosts can be configured so that certain images can only be scheduled
to hosts in a particular aggregate. This is done with metadata values set on
the host aggregate that are identified by beginning with the value of this
option. If the host is part of an aggregate with such a metadata key, the image
in the request spec must have the value of that metadata in its properties in
order for the scheduler to consider the host as acceptable.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'aggregate_image_properties_isolation' filter is
enabled.
Possible values:
* A string, where the string corresponds to an image property namespace
Related options:
* aggregate_image_properties_isolation_separator
"""),
cfg.StrOpt(
"aggregate_image_properties_isolation_separator",
default=".",
deprecated_group="DEFAULT",
help="""
Separator character(s) for image property namespace and name.
When using the aggregate_image_properties_isolation filter, the relevant
metadata keys are prefixed with the namespace defined in the
aggregate_image_properties_isolation_namespace configuration option plus a
separator. This option defines the separator to be used.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'aggregate_image_properties_isolation' filter
is enabled.
Possible values:
* A string, where the string corresponds to an image property namespace
separator character
Related options:
* aggregate_image_properties_isolation_namespace
""")]
trust_group = cfg.OptGroup(name="trusted_computing",
title="Trust parameters",
help="""
Configuration options for enabling Trusted Platform Module.
""")
trusted_opts = [
cfg.HostAddressOpt("attestation_server",
deprecated_for_removal=True,
deprecated_reason="Incomplete filter",
deprecated_since="Pike",
help="""
The host to use as the attestation server.
Cloud computing pools can involve thousands of compute nodes located at
different geographical locations, making it difficult for cloud providers to
identify a node's trustworthiness. When using the Trusted filter, users can
request that their VMs only be placed on nodes that have been verified by the
attestation server specified in this option.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A string representing the host name or IP address of the attestation server,
or an empty string.
Related options:
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_server_ca_file",
deprecated_for_removal=True,
deprecated_reason="Incomplete filter",
deprecated_since="Pike",
help="""
The absolute path to the certificate to use for authentication when connecting
to the attestation server. See the `attestation_server` help text for more
information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A string representing the path to the authentication certificate for the
attestation server, or an empty string.
Related options:
* attestation_server
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.PortOpt("attestation_port",
default=8443,
deprecated_for_removal=True,
deprecated_reason="Incomplete filter",
deprecated_since="Pike",
help="""
The port to use when connecting to the attestation server. See the
`attestation_server` help text for more information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_api_url",
default="/OpenAttestationWebServices/V1.0",
deprecated_for_removal=True,
deprecated_reason="Incomplete filter",
deprecated_since="Pike",
help="""
The URL on the attestation server to use. See the `attestation_server` help
text for more information about host verification.
This value must be just that path portion of the full URL, as it will be joined
to the host specified in the attestation_server option.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A valid URL string of the attestation server, or an empty string.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_auth_blob
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.StrOpt("attestation_auth_blob",
secret=True,
deprecated_for_removal=True,
deprecated_reason="Incomplete filter",
deprecated_since="Pike",
help="""
Attestation servers require a specific blob that is used to authenticate. The
content and format of the blob are determined by the particular attestation
server being used. There is no default value; you must supply the value as
specified by your attestation service. See the `attestation_server` help text
for more information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A string containing the specific blob required by the attestation server, or
an empty string.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_timeout
* attestation_insecure_ssl
"""),
cfg.IntOpt("attestation_auth_timeout",
default=60,
deprecated_for_removal=True,
deprecated_reason="Incomplete filter",
deprecated_since="Pike",
min=0,
help="""
This value controls how long a successful attestation is cached. Once this
period has elapsed, a new attestation request will be made. See the
`attestation_server` help text for more information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Possible values:
* A integer value, corresponding to the timeout interval for attestations in
seconds. Any integer is valid, although setting this to zero or negative
values can greatly impact performance when using an attestation service.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_insecure_ssl
"""),
cfg.BoolOpt("attestation_insecure_ssl",
default=False,
deprecated_for_removal=True,
deprecated_reason="Incomplete filter",
deprecated_since="Pike",
help="""
When set to True, the SSL certificate verification is skipped for the
attestation service. See the `attestation_server` help text for more
information about host verification.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect. Also note that this setting
only affects scheduling if the 'TrustedFilter' filter is enabled.
Related options:
* attestation_server
* attestation_server_ca_file
* attestation_port
* attestation_api_url
* attestation_auth_blob
* attestation_auth_timeout
"""),
]
metrics_group = cfg.OptGroup(name="metrics",
title="Metrics parameters",
help="""
Configuration options for metrics
Options under this group allow to adjust how values assigned to metrics are
calculated.
""")
metrics_weight_opts = [
cfg.FloatOpt("weight_multiplier",
default=1.0,
help="""
When using metrics to weight the suitability of a host, you can use this option
to change how the calculated weight influences the weight assigned to a host as
follows:
* >1.0: increases the effect of the metric on overall weight
* 1.0: no change to the calculated weight
* >0.0,<1.0: reduces the effect of the metric on overall weight
* 0.0: the metric value is ignored, and the value of the
'weight_of_unavailable' option is returned instead
* >-1.0,<0.0: the effect is reduced and reversed
* -1.0: the effect is reversed
* <-1.0: the effect is increased proportionally and reversed
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
Related options:
* weight_of_unavailable
"""),
cfg.ListOpt("weight_setting",
default=[],
help="""
This setting specifies the metrics to be weighed and the relative ratios for
each metric. This should be a single string value, consisting of a series of
one or more 'name=ratio' pairs, separated by commas, where 'name' is the name
of the metric to be weighed, and 'ratio' is the relative weight for that
metric.
Note that if the ratio is set to 0, the metric value is ignored, and instead
the weight will be set to the value of the 'weight_of_unavailable' option.
As an example, let's consider the case where this option is set to:
``name1=1.0, name2=-1.3``
The final weight will be:
``(name1.value * 1.0) + (name2.value * -1.3)``
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* A list of zero or more key/value pairs separated by commas, where the key is
a string representing the name of a metric and the value is a numeric weight
for that metric. If any value is set to 0, the value is ignored and the
weight will be set to the value of the 'weight_of_unavailable' option.
Related options:
* weight_of_unavailable
"""),
cfg.BoolOpt("required",
default=True,
help="""
This setting determines how any unavailable metrics are treated. If this option
is set to True, any hosts for which a metric is unavailable will raise an
exception, so it is recommended to also use the MetricFilter to filter out
those hosts before weighing.
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* True or False, where False ensures any metric being unavailable for a host
will set the host weight to 'weight_of_unavailable'.
Related options:
* weight_of_unavailable
"""),
cfg.FloatOpt("weight_of_unavailable",
default=float(-10000.0),
help="""
When any of the following conditions are met, this value will be used in place
of any actual metric value:
* One of the metrics named in 'weight_setting' is not available for a host,
and the value of 'required' is False
* The ratio specified for a metric in 'weight_setting' is 0
* The 'weight_multiplier' option is set to 0
This option is only used by the FilterScheduler and its subclasses; if you use
a different scheduler, this option has no effect.
Possible values:
* An integer or float value, where the value corresponds to the multipler
ratio for this weigher.
Related options:
* weight_setting
* required
* weight_multiplier
"""),
]
def register_opts(conf):
conf.register_group(scheduler_group)
conf.register_opts(scheduler_opts, group=scheduler_group)
conf.register_group(filter_scheduler_group)
conf.register_opts(filter_scheduler_opts, group=filter_scheduler_group)
conf.register_group(trust_group)
conf.register_opts(trusted_opts, group=trust_group)
conf.register_group(metrics_group)
conf.register_opts(metrics_weight_opts, group=metrics_group)
def list_opts():
return {scheduler_group: scheduler_opts,
filter_scheduler_group: filter_scheduler_opts,
trust_group: trusted_opts,
metrics_group: metrics_weight_opts}
| |
##
## Various util python methods which can be utilized and shared among different scripts
##
import os, shutil, glob, time, sys, platform, subprocess
from distutils.dir_util import copy_tree
def set_log_tag(t):
global TAG
TAG = t
############################################################
### colors for terminal (does not work in Windows... of course)
CEND = '\033[0m'
CBOLD = '\33[1m'
CITALIC = '\33[3m'
CURL = '\33[4m'
CBLINK = '\33[5m'
CBLINK2 = '\33[6m'
CSELECTED = '\33[7m'
CBLACK = '\33[30m'
CRED = '\33[31m'
CGREEN = '\33[32m'
CYELLOW = '\33[33m'
CBLUE = '\33[34m'
CVIOLET = '\33[35m'
CBEIGE = '\33[36m'
CWHITE = '\33[37m'
CBLACKBG = '\33[40m'
CREDBG = '\33[41m'
CGREENBG = '\33[42m'
CYELLOWBG = '\33[43m'
CBLUEBG = '\33[44m'
CVIOLETBG = '\33[45m'
CBEIGEBG = '\33[46m'
CWHITEBG = '\33[47m'
CGREY = '\33[90m'
CRED2 = '\33[91m'
CGREEN2 = '\33[92m'
CYELLOW2 = '\33[93m'
CBLUE2 = '\33[94m'
CVIOLET2 = '\33[95m'
CBEIGE2 = '\33[96m'
CWHITE2 = '\33[97m'
CGREYBG = '\33[100m'
CREDBG2 = '\33[101m'
CGREENBG2 = '\33[102m'
CYELLOWBG2 = '\33[103m'
CBLUEBG2 = '\33[104m'
CVIOLETBG2 = '\33[105m'
CBEIGEBG2 = '\33[106m'
CWHITEBG2 = '\33[107m'
############################################################
### file system util methods
def copy_file(sourceFile, destFile):
debug('copying: {0} -> {1}'.format(sourceFile, destFile))
shutil.copyfile(sourceFile, destFile)
def copy_files(fileNamePattern, sourceDir, destDir):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
debug('copying: {0} -> {1}'.format(file, destDir))
shutil.copy(file, destDir)
def copy_dir_contents(source_dir, dest_dir, copy_symlinks=False):
debug('copying dir contents: {0} -> {1}'.format(source_dir, dest_dir))
if not copy_symlinks:
copy_tree(source_dir, dest_dir)
else:
shutil.copytree(source_dir, dest_dir, symlinks=True);
def remove_files(fileNamePattern, sourceDir, log=True):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
if log:
debug('deleting: ' + file)
os.remove(file)
def rename_file(fileNamePattern, newFileName, sourceDir):
for file in glob.glob(sourceDir + '/' + fileNamePattern):
debug('rename: {0} -> {1}'.format(file, newFileName))
os.rename(file, sourceDir + '/' + newFileName)
def remove_dir_if_exists(path):
if os.path.exists(path):
debug('deleting dir: ' + path)
shutil.rmtree(path)
else:
error('canot delete {0}. dir does not exist'.format(path))
def remove_file_if_exists(path):
if os.path.exists(path):
debug('deleting: ' + path)
os.remove(path)
else:
error('canot delete {0}. file does not exist'.format(path))
def clear_dir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def recreate_dir(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
os.mkdir(dir)
def create_dir_if_not_exist(dir):
if not os.path.exists(dir):
os.makedirs(dir)
############################################################
### debug messages util methods
def debug(msg):
if not is_windows():
print(('{0}* [{1}][INFO]:{2} {3}').format(CBOLD, TAG, CEND, msg))
else:
print(('* [{0}][INFO]: {1}').format(TAG, msg))
def debug_green(msg):
if not is_windows():
print(('{0}* [{1}][INFO]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CGREEN, msg, CEND))
else:
print(('* [{0}][INFO]: {1}').format(TAG, msg))
def debug_blue(msg):
if not is_windows():
print(('{0}* [{1}][INFO]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CBLUE, msg, CEND))
else:
print(('* [{0}][INFO]: {1}').format(TAG, msg))
def error(msg, do_exit=False):
if not is_windows():
print(('{0}* [{1}][ERROR]:{2} {3}{4}{5}').format(CBOLD, TAG, CEND, CRED, msg, CEND))
else:
print(('* [{0}][ERROR]: {1}').format(TAG, msg))
if do_exit:
exit()
############################################################
### util
def check_submodule_dir(platform, submodule_dir):
if not os.path.isdir(submodule_dir) or not os.listdir(submodule_dir):
error('Submodule [{0}] folder empty. Did you forget to run >> git submodule update --init --recursive << ?'.format(platform))
exit()
def is_windows():
return platform.system().lower() == 'windows';
# https://stackoverflow.com/questions/17140886/how-to-search-and-replace-text-in-a-file-using-python
def replace_text_in_file(file_path, substring, replace_with):
# Read in the file
with open(file_path, 'r') as file:
filedata = file.read()
# Replace the target string
filedata = filedata.replace(substring, replace_with)
# Write the file out again
with open(file_path, 'w') as file:
file.write(filedata)
def execute_command(cmd_params, log=True):
if log:
debug_blue('Executing: [{0}]'.format(' '.join([str(cmd) for cmd in cmd_params])))
subprocess.call(cmd_params)
def change_dir(dir):
os.chdir(dir)
def get_env_variable(var_name):
return os.environ[var_name];
def xcode_build(target, configuration='Release'):
execute_command(['xcodebuild', '-target', target, '-configuration', configuration, 'clean', 'build', '-UseModernBuildSystem=NO'])
def adb_uninstall(package):
execute_command(['adb', 'uninstall', package])
def adb_install_apk(path):
execute_command(['adb', 'install', '-r', path])
def adb_shell(app_package):
execute_command(['adb', 'shell', 'monkey', '-p', app_package, '1'])
def gradle_make_release_jar(do_clean=False):
if (do_clean):
execute_command(['./gradlew', 'clean', 'adjustCoreJarRelease'])
else:
execute_command(['./gradlew', 'adjustCoreJarRelease'])
def gradle_make_debug_jar(do_clean=False):
if (do_clean):
execute_command(['./gradlew', 'clean', 'adjustCoreJarDebug'])
else:
execute_command(['./gradlew', 'adjustCoreJarDebug'])
def gradle_run(options):
cmd_params = ['./gradlew']
for opt in options:
cmd_params.append(opt)
execute_command(cmd_params)
def inject_cpp_bridge(android_proxy_dir, with_test_lib):
if with_test_lib:
execute_command(['{0}/inject_cpp_bridge.sh'.format(android_proxy_dir), '--with-test-lib'])
else:
execute_command(['{0}/inject_cpp_bridge.sh'.format(android_proxy_dir)])
def update_dist(root_dir):
debug_green('Populate dist folder with files needed for clients ...')
recreate_dir('{0}/dist'.format(root_dir))
copy_dir_contents('{0}/src'.format(root_dir), '{0}/dist'.format(root_dir))
remove_dir_if_exists('{0}/dist/test'.format(root_dir))
### cocos specific
def cocos_new_project(package_name, app_path, name):
execute_command(['cocos', 'new', '-l', 'cpp', '-p', package_name, '-d', app_path, name])
| |
import datetime
import os
from enum import Enum
from flask import Flask
from flask_marshmallow import Marshmallow
from marshmallow_enum import EnumField
import postgresql_db.database as database
app = Flask(__name__)
db = database.connect(app, 'postgres', os.environ['POSTGRES_PASSWORD'], 'postgres')
ma = Marshmallow(app)
'''
-----
Enums
-----
'''
class SenderType(Enum):
USER = "USER"
BOT = "BOT"
class PersonType(Enum):
LANDLORD = "LANDLORD"
TENANT = "TENANT"
class BotState(Enum):
DETERMINE_CLAIM_CATEGORY = "DETERMINE_CLAIM_CATEGORY"
RESOLVING_FACTS = "RESOLVING_FACTS"
RESOLVING_ADDITIONAL_FACTS = "RESOLVING_ADDITIONAL_FACTS"
AWAITING_ACKNOWLEDGEMENT = "AWAITING_ACKNOWLEDGEMENT"
GIVING_PREDICTION = "GIVING_PREDICTION"
class ClaimCategory(Enum):
LEASE_TERMINATION = "LEASE_TERMINATION"
NONPAYMENT = "NONPAYMENT"
RETAKE_RENTAL = "RETAKE_RENTAL"
class FactType(Enum):
TEXT = "TEXT"
BOOLEAN = "BOOLEAN"
MONEY = "MONEY"
DATE = "DATE"
DURATION_MONTHS = "DURATION_MONTHS"
class DocumentType(Enum):
LEASE = "LEASE"
'''
-----------------
SQLAlchemy Models
-----------------
'''
class Fact(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Attributes
name = db.Column(db.String(50), nullable=False)
summary = db.Column(db.String(60), nullable=False)
type = db.Column(db.Enum(FactType), nullable=False)
class Conversation(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Attributes
name = db.Column(db.String(40), nullable=False)
person_type = db.Column(db.Enum(PersonType))
claim_category = db.Column(db.Enum(ClaimCategory))
bot_state = db.Column(db.Enum(BotState))
# Documents
report = db.Column(db.Text)
# One to one
current_fact_id = db.Column(db.Integer, db.ForeignKey('fact.id'))
current_fact = db.relationship('Fact', uselist=False, backref='conversation')
# One to many
messages = db.relationship('Message')
fact_entities = db.relationship('FactEntity', cascade="all, delete-orphan")
files = db.relationship('File')
class Message(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Foreign Keys
conversation_id = db.Column(db.Integer, db.ForeignKey('conversation.id'))
# Attributes
sender_type = db.Column(db.Enum(SenderType), nullable=False)
timestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)
text = db.Column(db.Text, nullable=False)
possible_answers = db.Column(db.Text)
enforce_possible_answer = db.Column(db.Boolean)
# One to one
relevant_fact_id = db.Column(db.Integer, db.ForeignKey('fact.id'))
relevant_fact = db.relationship('Fact', uselist=False, backref='message')
file_request = db.relationship('FileRequest', uselist=False, backref='message')
def request_file(self, document_type):
self.file_request = FileRequest(document_type=document_type)
class UserConfirmation(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Foreign Keys
fact_id = db.Column(db.Integer, db.ForeignKey('fact.id'))
message_id = db.Column(db.Integer, db.ForeignKey('message.id'))
# Attributes
text = db.Column(db.Text, nullable=False)
class FileRequest(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Foreign Keys
message_id = db.Column(db.Integer, db.ForeignKey('message.id'))
# Attributes
document_type = db.Column(db.Enum(DocumentType), nullable=False)
def __init__(self, document_type):
self.document_type = document_type
class FactEntity(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Foreign Keys
conversation_id = db.Column(db.Integer, db.ForeignKey('conversation.id'))
# One to one
fact_id = db.Column(db.Integer, db.ForeignKey('fact.id'))
fact = db.relationship('Fact', uselist=False, backref='factentity')
# Attributes
value = db.Column(db.String(255), nullable=False)
class File(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Foreign Keys
conversation_id = db.Column(db.Integer, db.ForeignKey('conversation.id'))
# Attributes
name = db.Column(db.String(50), nullable=False)
type = db.Column(db.String(50), nullable=False)
path = db.Column(db.String(100))
timestamp = db.Column(db.DateTime, default=datetime.datetime.utcnow)
class Feedback(db.Model):
id = db.Column(db.Integer, primary_key=True)
# Attributes
feedback = db.Column(db.Text, nullable=False)
'''
----------------------
Bootstrapping Database
----------------------
'''
print("Creating database tables from models.py")
db.create_all()
print("Loading database with pre-defined fact values.")
# Function that persists a model to the db if it doesn't already exist
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance is None:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance
defined_facts = [
{'name': 'apartment_dirty', 'summary': 'Dwelling unfit for habitation', 'type': FactType.BOOLEAN},
{'name': 'bothers_others', 'summary': 'Tenant is disruptive', 'type': FactType.BOOLEAN},
{'name': 'disrespect_previous_judgement', 'summary': 'Previous board judgements disrespected',
'type': FactType.BOOLEAN},
{'name': 'landlord_inspector_fees', 'summary': 'Inspector hired', 'type': FactType.BOOLEAN},
{'name': 'landlord_notifies_tenant_retake_apartment', 'summary': 'Notified tenant of dwelling retake',
'type': FactType.BOOLEAN},
{'name': 'landlord_pays_indemnity', 'summary': 'Landlord paid compensation', 'type': FactType.BOOLEAN},
{'name': 'landlord_relocation_indemnity_fees', 'summary': 'Relocation fees compensated',
'type': FactType.BOOLEAN},
{'name': 'landlord_rent_change', 'summary': 'Rent change attempted', 'type': FactType.BOOLEAN},
{'name': 'landlord_rent_change_doc_renseignements', 'summary': 'Rent change documents filed',
'type': FactType.BOOLEAN},
{'name': 'landlord_retakes_apartment', 'summary': 'Landlord intends to retake dwelling', 'type': FactType.BOOLEAN},
{'name': 'landlord_sends_demand_regie_logement', 'summary': 'Landlord filed legal inquiry',
'type': FactType.BOOLEAN},
{'name': 'rent_increased', 'summary': 'Rent increased during lease term', 'type': FactType.BOOLEAN},
{'name': 'tenant_continuous_late_payment', 'summary': 'Tenant continually pays rent late',
'type': FactType.BOOLEAN},
{'name': 'tenant_sends_demand_regie_logement', 'summary': 'Tenant filed legal inquiry', 'type': FactType.BOOLEAN},
{'name': 'signed_proof_of_rent_debt', 'summary': 'Debt acknowledgement provided', 'type': FactType.BOOLEAN},
{'name': 'tenant_damaged_rental', 'summary': 'Rental property damaged', 'type': FactType.BOOLEAN},
{'name': 'tenant_dead', 'summary': 'Tenant deceased', 'type': FactType.BOOLEAN},
{'name': 'tenant_financial_problem', 'summary': 'Financial issues impeding rent payment', 'type': FactType.BOOLEAN},
{'name': 'tenant_individual_responsability', 'summary': 'Single tenant on lease', 'type': FactType.BOOLEAN},
{'name': 'tenant_is_bothered', 'summary': 'Perte de jouissance', 'type': FactType.BOOLEAN},
{'name': 'tenant_lease_fixed', 'summary': 'Lease end date stated explicitly', 'type': FactType.BOOLEAN},
{'name': 'tenant_left_without_paying', 'summary': 'Tenant left without paying', 'type': FactType.BOOLEAN},
{'name': 'tenant_monthly_payment', 'summary': 'Monthly rent payment', 'type': FactType.MONEY},
{'name': 'tenant_owes_rent', 'summary': 'Tenant owes overdue rent', 'type': FactType.MONEY},
{'name': 'tenant_refuses_retake_apartment', 'summary': 'Tenant refuses dwelling retake', 'type': FactType.BOOLEAN},
{'name': 'tenant_rent_not_paid_more_3_weeks', 'summary': 'Rent overdue over 3 weeks', 'type': FactType.BOOLEAN},
{'name': 'tenant_withold_rent_without_permission', 'summary': 'Rent withheld lacking legal permission',
'type': FactType.BOOLEAN},
{'name': 'tenant_not_paid_lease_timespan', 'summary': 'Months rent payment overdue',
'type': FactType.DURATION_MONTHS},
{'name': 'violent', 'summary': 'Dispute precipitated violence', 'type': FactType.BOOLEAN}
]
for fact_dict in defined_facts:
get_or_create(db.session, Fact, name=fact_dict['name'], summary=fact_dict['summary'], type=fact_dict['type'])
print("Finished loading pre-defined fact values.")
'''
-------------------
Marshmallow Schemas
-------------------
'''
class FileRequestSchema(ma.ModelSchema):
# Enum
document_type = EnumField(DocumentType, by_value=True)
class Meta:
fields = ['document_type']
class FactSchema(ma.ModelSchema):
# Enum
type = EnumField(FactType, by_value=True)
class Meta:
fields = ('name', 'summary', 'type')
class FactEntitySchema(ma.ModelSchema):
# One to one
fact = ma.Nested(FactSchema)
class Meta:
fields = ('id', 'value', 'fact')
class FileSchema(ma.ModelSchema):
class Meta:
fields = ('name', 'type', 'timestamp')
class FeedbackSchema(ma.ModelSchema):
class Meta:
fields = ['feedback']
class MessageSchema(ma.ModelSchema):
# Enum
sender_type = EnumField(SenderType, by_value=True)
# One to one
relevant_fact = ma.Nested(FactSchema)
file_request = ma.Nested(FileRequestSchema)
class Meta:
model = Message
class ConversationSchema(ma.ModelSchema):
# Enum
person_type = EnumField(PersonType, by_value=True)
claim_category = EnumField(ClaimCategory, by_value=True)
bot_state = EnumField(BotState, by_value=True)
# One to one
current_fact = ma.Nested(FactSchema)
# One to many
messages = ma.Nested(MessageSchema, many=True)
fact_entities = ma.Nested(FactEntitySchema, many=True)
class Meta:
fields = (
'id', 'name', 'person_type', 'claim_category', 'bot_state', 'current_fact', 'messages', 'fact_entities')
| |
from sympy import (Symbol, Set, Union, Interval, oo, S, sympify, nan,
GreaterThan, LessThan, Max, Min, And, Or, Eq, Ge, Le, Gt, Lt, Float,
FiniteSet, Intersection, imageset, I, true, false, ProductSet, E,
sqrt, Complement, EmptySet, sin, cos, Lambda, ImageSet, pi)
from sympy.mpmath import mpi
from sympy.utilities.pytest import raises
from sympy.utilities.pytest import raises, XFAIL
from sympy.abc import x, y, z
def test_interval_arguments():
assert Interval(0, oo) == Interval(0, oo, False, True)
assert Interval(0, oo).right_open is true
assert Interval(-oo, 0) == Interval(-oo, 0, True, False)
assert Interval(-oo, 0).left_open is true
assert isinstance(Interval(1, 1), FiniteSet)
assert Interval(1, 0) == S.EmptySet
assert Interval(1, 1).measure == 0
assert Interval(1, 1, False, True) == S.EmptySet
assert Interval(1, 1, True, False) == S.EmptySet
assert Interval(1, 1, True, True) == S.EmptySet
raises(ValueError, lambda: Interval(0, S.ImaginaryUnit))
raises(ValueError, lambda: Interval(0, Symbol('z')))
raises(NotImplementedError, lambda: Interval(0, 1, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, False, And(x, y)))
raises(NotImplementedError, lambda: Interval(0, 1, z, And(x, y)))
assert isinstance(Interval(1, Symbol('a', real=True)), Interval)
def test_interval_symbolic_end_points():
a = Symbol('a', real=True)
assert Union(Interval(0, a), Interval(0, 3)).sup == Max(a, 3)
assert Union(Interval(a, 0), Interval(-3, 0)).inf == Min(-3, a)
assert Interval(0, a).contains(1) == LessThan(1, a)
def test_union():
assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4)
assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \
Interval(1, 3, False, True)
assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3)
assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \
Interval(1, 3, True, True)
assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \
Interval(1, 3, True)
assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3)
assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \
Interval(1, 3)
assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \
Interval(1, 3)
assert Union(Interval(1, 2), S.EmptySet) == Interval(1, 2)
assert Union(S.EmptySet) == S.EmptySet
assert Union(Interval(0, 1), [FiniteSet(1.0/n) for n in range(1, 10)]) == \
Interval(0, 1)
assert Interval(1, 2).union(Interval(2, 3)) == \
Interval(1, 2) + Interval(2, 3)
assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3)
assert Union(Set()) == Set()
assert FiniteSet(1) + FiniteSet(2) + FiniteSet(3) == FiniteSet(1, 2, 3)
assert FiniteSet('ham') + FiniteSet('eggs') == FiniteSet('ham', 'eggs')
assert FiniteSet(1, 2, 3) + S.EmptySet == FiniteSet(1, 2, 3)
assert FiniteSet(1, 2, 3) & FiniteSet(2, 3, 4) == FiniteSet(2, 3)
assert FiniteSet(1, 2, 3) | FiniteSet(2, 3, 4) == FiniteSet(1, 2, 3, 4)
x = Symbol("x")
y = Symbol("y")
z = Symbol("z")
assert S.EmptySet | FiniteSet(x, FiniteSet(y, z)) == \
FiniteSet(x, FiniteSet(y, z))
# Test that Intervals and FiniteSets play nicely
assert Interval(1, 3) + FiniteSet(2) == Interval(1, 3)
assert Interval(1, 3, True, True) + FiniteSet(3) == \
Interval(1, 3, True, False)
X = Interval(1, 3) + FiniteSet(5)
Y = Interval(1, 2) + FiniteSet(3)
XandY = X.intersect(Y)
assert 2 in X and 3 in X and 3 in XandY
assert XandY.is_subset(X) and XandY.is_subset(Y)
raises(TypeError, lambda: Union(1, 2, 3))
assert X.is_iterable is False
# issue 7843
assert Union(S.EmptySet, FiniteSet(-sqrt(-I), sqrt(-I))) == FiniteSet(-sqrt(-I), sqrt(-I))
def test_difference():
assert Interval(1, 3) - Interval(1, 2) == Interval(2, 3, True)
assert Interval(1, 3) - Interval(2, 3) == Interval(1, 2, False, True)
assert Interval(1, 3, True) - Interval(2, 3) == Interval(1, 2, True, True)
assert Interval(1, 3, True) - Interval(2, 3, True) == \
Interval(1, 2, True, False)
assert Interval(0, 2) - FiniteSet(1) == \
Union(Interval(0, 1, False, True), Interval(1, 2, True, False))
assert FiniteSet(1, 2, 3) - FiniteSet(2) == FiniteSet(1, 3)
assert FiniteSet('ham', 'eggs') - FiniteSet('eggs') == FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4) - Interval(2, 10, True, False) == \
FiniteSet(1, 2)
assert FiniteSet(1, 2, 3, 4) - S.EmptySet == FiniteSet(1, 2, 3, 4)
assert Union(Interval(0, 2), FiniteSet(2, 3, 4)) - Interval(1, 3) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert -1 in S.Reals - S.Naturals
def test_Complement():
assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True)
assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1)
assert Complement(Union(Interval(0, 2),
FiniteSet(2, 3, 4)), Interval(1, 3)) == \
Union(Interval(0, 1, False, True), FiniteSet(4))
assert not 3 in Complement(Interval(0, 5), Interval(1, 4), evaluate=False)
assert -1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert not 1 in Complement(S.Reals, S.Naturals, evaluate=False)
assert Complement(S.Integers, S.UniversalSet) == EmptySet()
assert S.UniversalSet.complement(S.Integers) == EmptySet()
assert (not 0 in S.Reals.intersect(S.Integers - FiniteSet(0)))
assert S.EmptySet - S.Integers == S.EmptySet
assert (S.Integers - FiniteSet(0)) - FiniteSet(1) == S.Integers - FiniteSet(0, 1)
assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \
Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi))
def test_complement():
assert Interval(0, 1).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True))
assert Interval(0, 1, True, False).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, True, True))
assert Interval(0, 1, False, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, oo, False, True))
assert Interval(0, 1, True, True).complement(S.Reals) == \
Union(Interval(-oo, 0, True, False), Interval(1, oo, False, True))
assert S.UniversalSet.complement(S.EmptySet) == S.EmptySet
assert S.UniversalSet.complement(S.Reals) == S.EmptySet
assert S.UniversalSet.complement(S.UniversalSet) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(1, 2, True, True),
Interval(3, oo, True, True))
assert FiniteSet(0).complement(S.Reals) == \
Union(Interval(-oo, 0, True, True), Interval(0, oo, True, True))
assert (FiniteSet(5) + Interval(S.NegativeInfinity,
0)).complement(S.Reals) == \
Interval(0, 5, True, True) + Interval(5, S.Infinity, True, True)
assert FiniteSet(1, 2, 3).complement(S.Reals) == \
Interval(S.NegativeInfinity, 1, True, True) + \
Interval(1, 2, True, True) + Interval(2, 3, True, True) +\
Interval(3, S.Infinity, True, True)
assert FiniteSet(x).complement(S.Reals) == Complement(S.Reals, FiniteSet(x))
assert FiniteSet(0, x).complement(S.Reals) == Complement(Interval(-oo, 0, True, True) +
Interval(0, oo, True, True)
,FiniteSet(x), evaluate=False)
square = Interval(0, 1) * Interval(0, 1)
notsquare = square.complement(S.Reals*S.Reals)
assert all(pt in square for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(
pt in notsquare for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)])
assert not any(pt in square for pt in [(-1, 0), (1.5, .5), (10, 10)])
assert all(pt in notsquare for pt in [(-1, 0), (1.5, .5), (10, 10)])
def test_intersect():
x = Symbol('x')
assert Interval(0, 2).intersect(Interval(1, 2)) == Interval(1, 2)
assert Interval(0, 2).intersect(Interval(1, 2, True)) == \
Interval(1, 2, True)
assert Interval(0, 2, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, False)
assert Interval(0, 2, True, True).intersect(Interval(1, 2)) == \
Interval(1, 2, False, True)
assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \
Union(Interval(0, 1), Interval(2, 2))
assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x)
assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \
FiniteSet('ham')
assert FiniteSet(1, 2, 3, 4, 5).intersect(S.EmptySet) == S.EmptySet
assert Interval(0, 5).intersect(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersect(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(0, 2)) == \
Union(Interval(0, 1), Interval(2, 2))
assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2, True, True)) == \
S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersect(S.EmptySet) == \
S.EmptySet
assert Union(Interval(0, 5), FiniteSet('ham')).intersect(FiniteSet(2, 3, 4, 5, 6)) == \
FiniteSet(2, 3, 4, 5)
# tests for the intersection alias
assert Interval(0, 5).intersection(FiniteSet(1, 3)) == FiniteSet(1, 3)
assert Interval(0, 1, True, True).intersection(FiniteSet(1)) == S.EmptySet
assert Union(Interval(0, 1), Interval(2, 3)).intersection(Interval(1, 2)) == \
Union(Interval(1, 1), Interval(2, 2))
def test_intersection():
# iterable
i = Intersection(FiniteSet(1, 2, 3), Interval(2, 5), evaluate=False)
assert i.is_iterable
assert set(i) == set([S(2), S(3)])
# challenging intervals
x = Symbol('x', real=True)
i = Intersection(Interval(0, 3), Interval(x, 6))
assert (5 in i) is False
raises(TypeError, lambda: 2 in i)
# Singleton special cases
assert Intersection(Interval(0, 1), S.EmptySet) == S.EmptySet
# Products
line = Interval(0, 5)
i = Intersection(line**2, line**3, evaluate=False)
assert (2, 2) not in i
assert (2, 2, 2) not in i
raises(ValueError, lambda: list(i))
assert Intersection(Intersection(S.Integers, S.Naturals, evaluate=False),
S.Reals, evaluate=False) == \
Intersection(S.Integers, S.Naturals, S.Reals, evaluate=False)
def test_is_disjoint():
assert Interval(0, 2).is_disjoint(Interval(1, 2)) == False
assert Interval(0, 2).is_disjoint(Interval(3, 4)) == True
def test_ProductSet_of_single_arg_is_arg():
assert ProductSet(Interval(0, 1)) == Interval(0, 1)
def test_interval_subs():
a = Symbol('a', real=True)
assert Interval(0, a).subs(a, 2) == Interval(0, 2)
assert Interval(a, 0).subs(a, 2) == S.EmptySet
def test_interval_to_mpi():
assert Interval(0, 1).to_mpi() == mpi(0, 1)
assert Interval(0, 1, True, False).to_mpi() == mpi(0, 1)
assert type(Interval(0, 1).to_mpi()) == type(mpi(0, 1))
def test_measure():
a = Symbol('a', real=True)
assert Interval(1, 3).measure == 2
assert Interval(0, a).measure == a
assert Interval(1, a).measure == a - 1
assert Union(Interval(1, 2), Interval(3, 4)).measure == 2
assert Union(Interval(1, 2), Interval(3, 4), FiniteSet(5, 6, 7)).measure \
== 2
assert FiniteSet(1, 2, oo, a, -oo, -5).measure == 0
assert S.EmptySet.measure == 0
square = Interval(0, 10) * Interval(0, 10)
offsetsquare = Interval(5, 15) * Interval(5, 15)
band = Interval(-oo, oo) * Interval(2, 4)
assert square.measure == offsetsquare.measure == 100
assert (square + offsetsquare).measure == 175 # there is some overlap
assert (square - offsetsquare).measure == 75
assert (square * FiniteSet(1, 2, 3)).measure == 0
assert (square.intersect(band)).measure == 20
assert (square + band).measure == oo
assert (band * FiniteSet(1, 2, 3)).measure == nan
def test_is_subset():
assert Interval(0, 1).is_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_subset(Interval(0, 2)) is False
assert FiniteSet(1, 2).is_subset(FiniteSet(1, 2, 3, 4))
assert FiniteSet(4, 5).is_subset(FiniteSet(1, 2, 3, 4)) is False
assert FiniteSet(1).is_subset(Interval(0, 2))
assert FiniteSet(1, 2).is_subset(Interval(0, 2, True, True)) is False
assert (Interval(1, 2) + FiniteSet(3)).is_subset(
(Interval(0, 2, False, True) + FiniteSet(2, 3)))
assert Interval(3, 4).is_subset(Union(Interval(0, 1), Interval(2, 5))) is True
assert Interval(3, 6).is_subset(Union(Interval(0, 1), Interval(2, 5))) is False
assert FiniteSet(1, 2, 3, 4).is_subset(Interval(0, 5)) is True
assert S.EmptySet.is_subset(FiniteSet(1, 2, 3)) is True
assert Interval(0, 1).is_subset(S.EmptySet) is False
assert S.EmptySet.is_subset(S.EmptySet) is True
raises(ValueError, lambda: S.EmptySet.is_subset(1))
# tests for the issubset alias
assert FiniteSet(1, 2, 3, 4).issubset(Interval(0, 5)) is True
assert S.EmptySet.issubset(FiniteSet(1, 2, 3)) is True
def test_is_proper_subset():
assert Interval(0, 1).is_proper_subset(Interval(0, 2)) is True
assert Interval(0, 3).is_proper_subset(Interval(0, 2)) is False
assert S.EmptySet.is_proper_subset(FiniteSet(1, 2, 3)) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_subset(0))
def test_is_superset():
assert Interval(0, 1).is_superset(Interval(0, 2)) == False
assert Interval(0, 3).is_superset(Interval(0, 2))
assert FiniteSet(1, 2).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(4, 5).is_superset(FiniteSet(1, 2, 3, 4)) == False
assert FiniteSet(1).is_superset(Interval(0, 2)) == False
assert FiniteSet(1, 2).is_superset(Interval(0, 2, True, True)) == False
assert (Interval(1, 2) + FiniteSet(3)).is_superset(
(Interval(0, 2, False, True) + FiniteSet(2, 3))) == False
assert Interval(3, 4).is_superset(Union(Interval(0, 1), Interval(2, 5))) == False
assert FiniteSet(1, 2, 3, 4).is_superset(Interval(0, 5)) == False
assert S.EmptySet.is_superset(FiniteSet(1, 2, 3)) == False
assert Interval(0, 1).is_superset(S.EmptySet) == True
assert S.EmptySet.is_superset(S.EmptySet) == True
raises(ValueError, lambda: S.EmptySet.is_superset(1))
# tests for the issuperset alias
assert Interval(0, 1).issuperset(S.EmptySet) == True
assert S.EmptySet.issuperset(S.EmptySet) == True
def test_is_proper_superset():
assert Interval(0, 1).is_proper_superset(Interval(0, 2)) is False
assert Interval(0, 3).is_proper_superset(Interval(0, 2)) is True
assert FiniteSet(1, 2, 3).is_proper_superset(S.EmptySet) is True
raises(ValueError, lambda: Interval(0, 1).is_proper_superset(0))
def test_contains():
assert Interval(0, 2).contains(1) is S.true
assert Interval(0, 2).contains(3) is S.false
assert Interval(0, 2, True, False).contains(0) is S.false
assert Interval(0, 2, True, False).contains(2) is S.true
assert Interval(0, 2, False, True).contains(0) is S.true
assert Interval(0, 2, False, True).contains(2) is S.false
assert Interval(0, 2, True, True).contains(0) is S.false
assert Interval(0, 2, True, True).contains(2) is S.false
assert FiniteSet(1, 2, 3).contains(2) is S.true
assert FiniteSet(1, 2, Symbol('x')).contains(Symbol('x')) is S.true
items = [1, 2, S.Infinity, S('ham'), -1.1]
fset = FiniteSet(*items)
assert all(item in fset for item in items)
assert all(fset.contains(item) is S.true for item in items)
assert Union(Interval(0, 1), Interval(2, 5)).contains(3) is S.true
assert Union(Interval(0, 1), Interval(2, 5)).contains(6) is S.false
assert Union(Interval(0, 1), FiniteSet(2, 5)).contains(3) is S.false
assert S.EmptySet.contains(1) is S.false
def test_interval_symbolic():
x = Symbol('x')
e = Interval(0, 1)
assert e.contains(x) == And(0 <= x, x <= 1)
raises(TypeError, lambda: x in e)
e = Interval(0, 1, True, True)
assert e.contains(x) == And(0 < x, x < 1)
def test_union_contains():
x = Symbol('x')
i1 = Interval(0, 1)
i2 = Interval(2, 3)
i3 = Union(i1, i2)
raises(TypeError, lambda: x in i3)
e = i3.contains(x)
assert e == Or(And(0 <= x, x <= 1), And(2 <= x, x <= 3))
assert e.subs(x, -0.5) is false
assert e.subs(x, 0.5) is true
assert e.subs(x, 1.5) is false
assert e.subs(x, 2.5) is true
assert e.subs(x, 3.5) is false
U = Interval(0, 2, True, True) + Interval(10, oo) + FiniteSet(-1, 2, 5, 6)
assert all(el not in U for el in [0, 4, -oo])
assert all(el in U for el in [2, 5, 10])
def test_is_number():
assert Interval(0, 1).is_number is False
assert Set().is_number is False
def test_Interval_is_left_unbounded():
assert Interval(3, 4).is_left_unbounded is False
assert Interval(-oo, 3).is_left_unbounded is True
assert Interval(Float("-inf"), 3).is_left_unbounded is True
def test_Interval_is_right_unbounded():
assert Interval(3, 4).is_right_unbounded is False
assert Interval(3, oo).is_right_unbounded is True
assert Interval(3, Float("+inf")).is_right_unbounded is True
def test_Interval_as_relational():
x = Symbol('x')
assert Interval(-1, 2, False, False).as_relational(x) == \
And(Le(-1, x), Le(x, 2))
assert Interval(-1, 2, True, False).as_relational(x) == \
And(Lt(-1, x), Le(x, 2))
assert Interval(-1, 2, False, True).as_relational(x) == \
And(Le(-1, x), Lt(x, 2))
assert Interval(-1, 2, True, True).as_relational(x) == \
And(Lt(-1, x), Lt(x, 2))
assert Interval(-oo, 2, right_open=False).as_relational(x) == And(Le(x, 2), Lt(-oo, x))
assert Interval(-oo, 2, right_open=True).as_relational(x) == And(Lt(x, 2), Lt(-oo, x))
assert Interval(-2, oo, left_open=False).as_relational(x) == And(Le(-2, x), Lt(x, oo))
assert Interval(-2, oo, left_open=True).as_relational(x) == And(Lt(-2, x), Lt(x, oo))
assert Interval(-oo, oo).as_relational(x) == And(Lt(-oo, x), Lt(x, oo))
def test_Finite_as_relational():
x = Symbol('x')
y = Symbol('y')
assert FiniteSet(1, 2).as_relational(x) == Or(Eq(x, 1), Eq(x, 2))
assert FiniteSet(y, -5).as_relational(x) == Or(Eq(x, y), Eq(x, -5))
def test_Union_as_relational():
x = Symbol('x')
assert (Interval(0, 1) + FiniteSet(2)).as_relational(x) == \
Or(And(Le(0, x), Le(x, 1)), Eq(x, 2))
assert (Interval(0, 1, True, True) + FiniteSet(1)).as_relational(x) == \
And(Lt(0, x), Le(x, 1))
def test_Intersection_as_relational():
x = Symbol('x')
assert (Intersection(Interval(0, 1), FiniteSet(2),
evaluate=False).as_relational(x)
== And(And(Le(0, x), Le(x, 1)), Eq(x, 2)))
def test_EmptySet():
assert S.EmptySet.as_relational(Symbol('x')) is False
assert S.EmptySet.intersect(S.UniversalSet) == S.EmptySet
assert S.EmptySet.boundary == S.EmptySet
def test_finite_basic():
x = Symbol('x')
A = FiniteSet(1, 2, 3)
B = FiniteSet(3, 4, 5)
AorB = Union(A, B)
AandB = A.intersect(B)
assert A.is_subset(AorB) and B.is_subset(AorB)
assert AandB.is_subset(A)
assert AandB == FiniteSet(3)
assert A.inf == 1 and A.sup == 3
assert AorB.inf == 1 and AorB.sup == 5
assert FiniteSet(x, 1, 5).sup == Max(x, 5)
assert FiniteSet(x, 1, 5).inf == Min(x, 1)
# issue 7335
assert FiniteSet(S.EmptySet) != S.EmptySet
assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3)
assert FiniteSet((1, 2, 3)) != FiniteSet(1, 2, 3)
# Ensure a variety of types can exist in a FiniteSet
s = FiniteSet((1, 2), Float, A, -5, x, 'eggs', x**2, Interval)
assert (A > B) is False
assert (A >= B) is False
assert (A < B) is False
assert (A <= B) is False
assert AorB > A and AorB > B
assert AorB >= A and AorB >= B
assert A >= A and A <= A
assert A >= AandB and B >= AandB
assert A > AandB and B > AandB
def test_powerset():
# EmptySet
A = FiniteSet()
pset = A.powerset()
assert len(pset) == 1
assert pset == FiniteSet(S.EmptySet)
# FiniteSets
A = FiniteSet(1, 2)
pset = A.powerset()
assert len(pset) == 2**len(A)
assert pset == FiniteSet(FiniteSet(), FiniteSet(1),
FiniteSet(2), A)
# Not finite sets
I = Interval(0, 1)
raises(NotImplementedError, I.powerset)
def test_product_basic():
H, T = 'H', 'T'
unit_line = Interval(0, 1)
d6 = FiniteSet(1, 2, 3, 4, 5, 6)
d4 = FiniteSet(1, 2, 3, 4)
coin = FiniteSet(H, T)
square = unit_line * unit_line
assert (0, 0) in square
assert 0 not in square
assert (H, T) in coin ** 2
assert (.5, .5, .5) in square * unit_line
assert (H, 3, 3) in coin * d6* d6
HH, TT = sympify(H), sympify(T)
assert set(coin**2) == set(((HH, HH), (HH, TT), (TT, HH), (TT, TT)))
assert (d4*d4).is_subset(d6*d6)
assert square.complement(Interval(-oo, oo)*Interval(-oo, oo)) == Union(
(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True))*Interval(-oo, oo),
Interval(-oo, oo)*(Interval(-oo, 0, True, True) +
Interval(1, oo, True, True)))
assert (Interval(-5, 5)**3).is_subset(Interval(-10, 10)**3)
assert not (Interval(-10, 10)**3).is_subset(Interval(-5, 5)**3)
assert not (Interval(-5, 5)**2).is_subset(Interval(-10, 10)**3)
assert (Interval(.2, .5)*FiniteSet(.5)).is_subset(square) # segment in square
assert len(coin*coin*coin) == 8
assert len(S.EmptySet*S.EmptySet) == 0
assert len(S.EmptySet*coin) == 0
raises(TypeError, lambda: len(coin*Interval(0, 2)))
def test_real():
x = Symbol('x', real=True, finite=True)
I = Interval(0, 5)
J = Interval(10, 20)
A = FiniteSet(1, 2, 30, x, S.Pi)
B = FiniteSet(-4, 0)
C = FiniteSet(100)
D = FiniteSet('Ham', 'Eggs')
assert all(s.is_subset(S.Reals) for s in [I, J, A, B, C])
assert not D.is_subset(S.Reals)
assert all((a + b).is_subset(S.Reals) for a in [I, J, A, B, C] for b in [I, J, A, B, C])
assert not any((a + D).is_subset(S.Reals) for a in [I, J, A, B, C, D])
assert not (I + A + D).is_subset(S.Reals)
def test_supinf():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert (Interval(0, 1) + FiniteSet(2)).sup == 2
assert (Interval(0, 1) + FiniteSet(2)).inf == 0
assert (Interval(0, 1) + FiniteSet(x)).sup == Max(1, x)
assert (Interval(0, 1) + FiniteSet(x)).inf == Min(0, x)
assert FiniteSet(5, 1, x).sup == Max(5, x)
assert FiniteSet(5, 1, x).inf == Min(1, x)
assert FiniteSet(5, 1, x, y).sup == Max(5, x, y)
assert FiniteSet(5, 1, x, y).inf == Min(1, x, y)
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).sup == \
S.Infinity
assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).inf == \
S.NegativeInfinity
assert FiniteSet('Ham', 'Eggs').sup == Max('Ham', 'Eggs')
def test_universalset():
U = S.UniversalSet
x = Symbol('x')
assert U.as_relational(x) is True
assert U.union(Interval(2, 4)) == U
assert U.intersect(Interval(2, 4)) == Interval(2, 4)
assert U.measure == S.Infinity
assert U.boundary == S.EmptySet
assert U.contains(0) is S.true
def test_Union_of_ProductSets_shares():
line = Interval(0, 2)
points = FiniteSet(0, 1, 2)
assert Union(line * line, line * points) == line * line
def test_Interval_free_symbols():
# issue 6211
assert Interval(0, 1).free_symbols == set()
x = Symbol('x', real=True)
assert Interval(0, x).free_symbols == set([x])
def test_image_interval():
from sympy.core.numbers import Rational
x = Symbol('x', real=True)
a = Symbol('a', real=True)
assert imageset(x, 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(x, 2*x, Interval(-2, 1, True, False)) == \
Interval(-4, 2, True, False)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1)) == Interval(0, 4)
assert imageset(x, x**2, Interval(-2, 1, True, False)) == \
Interval(0, 4, False, True)
assert imageset(x, x**2, Interval(-2, 1, True, True)) == \
Interval(0, 4, False, True)
assert imageset(x, (x - 2)**2, Interval(1, 3)) == Interval(0, 1)
assert imageset(x, 3*x**4 - 26*x**3 + 78*x**2 - 90*x, Interval(0, 4)) == \
Interval(-35, 0) # Multiple Maxima
assert imageset(x, x + 1/x, Interval(-oo, oo)) == Interval(-oo, -2) \
+ Interval(2, oo) # Single Infinite discontinuity
assert imageset(x, 1/x + 1/(x-1)**2, Interval(0, 2, True, False)) == \
Interval(Rational(3, 2), oo, False) # Multiple Infinite discontinuities
# Test for Python lambda
assert imageset(lambda x: 2*x, Interval(-2, 1)) == Interval(-4, 2)
assert imageset(Lambda(x, a*x), Interval(0, 1)) == \
ImageSet(Lambda(x, a*x), Interval(0, 1))
assert imageset(Lambda(x, sin(cos(x))), Interval(0, 1)) == \
ImageSet(Lambda(x, sin(cos(x))), Interval(0, 1))
@XFAIL # See: https://github.com/sympy/sympy/pull/2723#discussion_r8659826
def test_image_Intersection():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert imageset(x, x**2, Interval(-2, 0).intersect(Interval(x, y))) == \
Interval(0, 4).intersect(Interval(Min(x**2, y**2), Max(x**2, y**2)))
def test_image_FiniteSet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, FiniteSet(1, 2, 3)) == FiniteSet(2, 4, 6)
def test_image_Union():
x = Symbol('x', real=True)
assert imageset(x, x**2, Interval(-2, 0) + FiniteSet(1, 2, 3)) == \
(Interval(0, 4) + FiniteSet(9))
def test_image_EmptySet():
x = Symbol('x', real=True)
assert imageset(x, 2*x, S.EmptySet) == S.EmptySet
def test_issue_5724_7680():
assert I not in S.Reals # issue 7680
assert Interval(-oo, oo).contains(I) is S.false
def test_boundary():
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert FiniteSet(1).boundary == FiniteSet(1)
assert all(Interval(0, 1, left_open, right_open).boundary == FiniteSet(0, 1)
for left_open in (true, false) for right_open in (true, false))
def test_boundary_Union():
assert (Interval(0, 1) + Interval(2, 3)).boundary == FiniteSet(0, 1, 2, 3)
assert ((Interval(0, 1, False, True)
+ Interval(1, 2, True, False)).boundary == FiniteSet(0, 1, 2))
assert (Interval(0, 1) + FiniteSet(2)).boundary == FiniteSet(0, 1, 2)
assert Union(Interval(0, 10), Interval(5, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
assert Union(Interval(0, 10), Interval(0, 1), evaluate=False).boundary \
== FiniteSet(0, 10)
assert Union(Interval(0, 10, True, True),
Interval(10, 15, True, True), evaluate=False).boundary \
== FiniteSet(0, 10, 15)
@XFAIL
def test_union_boundary_of_joining_sets():
""" Testing the boundary of unions is a hard problem """
assert Union(Interval(0, 10), Interval(10, 15), evaluate=False).boundary \
== FiniteSet(0, 15)
def test_boundary_ProductSet():
open_square = Interval(0, 1, True, True) ** 2
assert open_square.boundary == (FiniteSet(0, 1) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1))
second_square = Interval(1, 2, True, True) * Interval(0, 1, True, True)
assert (open_square + second_square).boundary == (
FiniteSet(0, 1) * Interval(0, 1)
+ FiniteSet(1, 2) * Interval(0, 1)
+ Interval(0, 1) * FiniteSet(0, 1)
+ Interval(1, 2) * FiniteSet(0, 1))
def test_boundary_ProductSet_line():
line_in_r2 = Interval(0, 1) * FiniteSet(0)
assert line_in_r2.boundary == line_in_r2
def test_is_open():
assert not Interval(0, 1, False, False).is_open
assert not Interval(0, 1, True, False).is_open
assert Interval(0, 1, True, True).is_open
assert not FiniteSet(1, 2, 3).is_open
def test_is_closed():
assert Interval(0, 1, False, False).is_closed
assert not Interval(0, 1, True, False).is_closed
assert FiniteSet(1, 2, 3).is_closed
def test_closure():
assert Interval(0, 1, False, True).closure == Interval(0, 1, False, False)
def test_interior():
assert Interval(0, 1, False, True).interior == Interval(0, 1, True, True)
def test_issue_7841():
raises(TypeError, lambda: x in S.Reals)
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import json
class ResultType(object):
Pass = 'Pass'
Failure = 'Failure'
ImageOnlyFailure = 'ImageOnlyFailure'
Timeout = 'Timeout'
Crash = 'Crash'
Skip = 'Skip'
values = (Pass, Failure, ImageOnlyFailure, Timeout, Crash, Skip)
class Result(object):
# too many instance attributes pylint: disable=R0902
# too many arguments pylint: disable=R0913
def __init__(self, name, actual, started, took, worker,
expected=None, unexpected=False,
flaky=False, code=0, out='', err='', pid=0):
self.name = name
self.actual = actual
self.started = started
self.took = took
self.worker = worker
self.expected = expected or [ResultType.Pass]
self.unexpected = unexpected
self.flaky = flaky
self.code = code
self.out = out
self.err = err
self.pid = pid
class ResultSet(object):
def __init__(self):
self.results = []
def add(self, result):
self.results.append(result)
TEST_SEPARATOR = '.'
def make_full_results(metadata, seconds_since_epoch, all_test_names, results):
"""Convert the typ results to the Chromium JSON test result format.
See http://www.chromium.org/developers/the-json-test-results-format
"""
# We use OrderedDicts here so that the output is stable.
full_results = OrderedDict()
full_results['version'] = 3
full_results['interrupted'] = False
full_results['path_delimiter'] = TEST_SEPARATOR
full_results['seconds_since_epoch'] = seconds_since_epoch
for md in metadata:
key, val = md.split('=', 1)
full_results[key] = val
passing_tests = _passing_test_names(results)
failed_tests = failed_test_names(results)
skipped_tests = set(all_test_names) - passing_tests - failed_tests
full_results['num_failures_by_type'] = OrderedDict()
full_results['num_failures_by_type']['FAIL'] = len(failed_tests)
full_results['num_failures_by_type']['PASS'] = len(passing_tests)
full_results['num_failures_by_type']['SKIP'] = len(skipped_tests)
full_results['tests'] = OrderedDict()
for test_name in all_test_names:
value = _results_for_test(test_name, results)
if test_name in skipped_tests:
value['expected'] = 'SKIP'
else:
value['expected'] = 'PASS'
if value['actual'].endswith('FAIL'):
value['is_unexpected'] = True
_add_path_to_trie(full_results['tests'], test_name, value)
return full_results
def make_upload_request(test_results_server, builder, master, testtype,
full_results):
if test_results_server.startswith('http'):
url = '%s/testfile/upload' % test_results_server
else:
url = 'https://%s/testfile/upload' % test_results_server
attrs = [('builder', builder),
('master', master),
('testtype', testtype)]
content_type, data = _encode_multipart_form_data(attrs, full_results)
return url, content_type, data
def exit_code_from_full_results(full_results):
return 1 if num_failures(full_results) else 0
def num_failures(full_results):
return full_results['num_failures_by_type']['FAIL']
def failed_test_names(results):
names = set()
for r in results.results:
if r.actual == ResultType.Failure:
names.add(r.name)
elif ((r.actual == ResultType.Pass or r.actual == ResultType.Skip)
and r.name in names):
# This check indicates that a test failed, and then either passed
# or was skipped on a retry. It is somewhat counterintuitive
# that a test that failed and then skipped wouldn't be considered
# failed, but that's at least consistent with a test that is
# skipped every time.
names.remove(r.name)
return names
def _passing_test_names(results):
return set(r.name for r in results.results if r.actual == ResultType.Pass)
def _results_for_test(test_name, results):
value = OrderedDict()
actuals = []
times = []
for r in results.results:
if r.name == test_name:
if r.actual == ResultType.Failure:
actuals.append('FAIL')
elif r.actual == ResultType.Pass:
actuals.append('PASS')
elif r.actual == ResultType.Skip:
actuals.append('SKIP')
# The time a test takes is a floating point number of seconds;
# if we were to encode this unmodified, then when we converted it
# to JSON it might make the file significantly larger. Instead
# we truncate the file to ten-thousandths of a second, which is
# probably more than good enough for most tests.
times.append(round(r.took, 4))
if not actuals: # pragma: untested
actuals.append('SKIP')
value['actual'] = ' '.join(actuals)
value['times'] = times
return value
def _add_path_to_trie(trie, path, value):
if TEST_SEPARATOR not in path:
trie[path] = value
return
directory, rest = path.split(TEST_SEPARATOR, 1)
if directory not in trie:
trie[directory] = {}
_add_path_to_trie(trie[directory], rest, value)
def _encode_multipart_form_data(attrs, test_results):
# Cloned from webkitpy/common/net/file_uploader.py
BOUNDARY = '-J-S-O-N-R-E-S-U-L-T-S---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for key, value in attrs:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="file"; '
'filename="full_results.json"')
lines.append('Content-Type: application/json')
lines.append('')
lines.append(json.dumps(test_results))
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
| |
#!/usr/bin/env python
# $Id: setup.py,v 1.36.2.17 2008/05/10 08:03:45 marcusva Exp $
# setup script for ocempgui
import distutils.sysconfig
from distutils.core import setup, Extension
from distutils.command.install_data import install_data
import os, sys, glob, time
VERSION = "0.2.9"
# Minimum requirements.
ATK_MINIMUM = "1.18.0"
PYGAME_MINIMUM = (1, 7, 1)
PYTHON_MINIMUM = (2, 3)
PAPI_VERSION = "0.0.5"
PAPI_DEBUG = "1"
##
# General configuration stuff.
##
def check_pkgconfig ():
"""Checks for the pkg-config utility."""
if sys.platform == "win32":
return os.system ("pkg-config > NUL") == 0
else:
return os.system ("pkg-config 2> /dev/null") == 256
def pkg_get_flags (package, flags, repl=None):
"""Gets the general compiler flags for a specific package using the
pkg-config utility."""
pipe = os.popen ("pkg-config %s %s" % (flags, package), "r")
data = pipe.readline ().strip ()
pipe.close ()
if repl:
return data.replace (repl, "").split ()
return data.split ()
def pkg_get_all_cflags (name):
"""Gets all necessary flags for a compiler using the pkg-config
utility."""
return pkg_get_flags (name, "--cflags-only-I", "-I"), \
pkg_get_flags (name, "--libs-only-L", "-L"), \
pkg_get_flags (name, "--libs-only-l", "-l")
def get_directory_list (base):
"""Gets a list of subdirectories for the given base path."""
# Get the needed ocempgui directory.
realpath = os.path.split (os.path.abspath (sys.argv[0]))[0]
# First get all the directories.
paths = glob.glob (os.path.join (realpath, base, "*"))
dirpaths = []
for x in paths:
if os.path.isdir (x):
dirpaths += get_directory_list (os.path.join (base, x))
# Although this should not happen, guarantee, that there is no CVS
# target.
dirpaths = [x for x in dirpaths if x.find ("CVS") == -1]
# Do not forget the main directory.
dirpaths = [os.path.join (realpath, base)] + dirpaths
return dirpaths
def get_installation_files (base, installpath, filedict):
"""Create a nice list from it suitable for the data_files section of the
distutils setup."""
# Get the needed ocempgui directory.
realpath = os.path.split (os.path.abspath (sys.argv[0]))[0]
filelist = []
for key in filedict:
# We also need to get rid of the current directory prefix and
# set it it to the correct installation prefix.
try:
path = key.split (os.path.join (realpath, base, ""))[1]
except IndexError:
# We got the main directory.
path = ""
path = os.path.join (installpath, path)
# Add the files.
files = []
for installfile in filedict[key]:
installfile = installfile.split (os.path.join (realpath, ""), 1)[1]
files.append (installfile)
filelist.append ((path, files))
return filelist
##
# Installation routines.
##
def adjust_paths (datadir, files):
"""Adjusts the datadir paths in the style using files."""
path = os.path.join (datadir, "share", "ocempgui")
for f in files:
fd = open (f, "r+")
lines = fd.readlines ()
for i, l in enumerate (lines):
lines[i] = l.replace ("@DATAPATH@", path)
fd.seek (0)
fd.writelines (lines)
fd.close ()
class InstallData (install_data):
"""Overrides the install_data behaviour to adjust the data paths."""
def run (self):
install_lib = self.get_finalized_command ("install_lib")
bdist = self.get_finalized_command ("bdist")
files = [os.path.join (install_lib.install_dir, "ocempgui", "widgets",
"Constants.py")]
binary = False
isrpm = False
iswininst = False
print bdist.bdist_base, self.install_dir
for entry in sys.argv[1:]:
if entry.startswith ("bdist"):
binary = True
if entry == "bdist_wininst":
iswininst = True
elif entry == "bdist_rpm":
isrpm = True
elif entry == "--format=rpm":
isrpm = True
elif entry == "--format=wininst":
iswininst = True
# Binary distribution build.
if binary:
path = bdist.bdist_base
if isrpm:
path = os.path.join (path, "rpm")
elif iswininst:
path = os.path.join (path, "wininst")
else:
path = os.path.join (path, "dumb")
adjust_paths (self.install_dir.replace(path, ""), files)
else:
adjust_paths (self.install_dir, files)
install_data.run (self)
# Update the .pyc file (s).
install_lib.byte_compile (files)
def get_papi_defines ():
"""Builds the defines list for the C Compiler."""
val = [("DEBUG", PAPI_DEBUG), ("VERSION", '"0.0.5"')]
if sys.platform == "win32":
val.append (("IS_WIN32", "1"))
return val
def get_papi_files ():
"""Gets the list of file to use for building the papi accessibility
module."""
path = os.path.join ("ocempgui", "access", "papi")
files = glob.glob (os.path.join (path, "*.c"))
return files
def get_data_files ():
"""Gets a list of the files beneath data/ to install."""
installpath = os.path.join ("share", "ocempgui")
path = "data"
dirs = get_directory_list (path)
filedict = {}
for path in dirs:
files = glob.glob (os.path.join (path, "*.*"))
if files:
filedict[path] = files
return get_installation_files ("data", installpath, filedict)
def get_documentation_files ():
"""Gets a list of files to install as documentation."""
installpath = os.path.join ("share", "doc", "ocempgui")
docpaths = get_directory_list ("doc")
# Traverse all the directories in the docpath an get the needed files.
# Every file installed from the docs will have a suffix.
filedict = {}
for path in docpaths:
files = glob.glob (os.path.join (path, "*.*"))
if files:
filedict[path] = files
return get_installation_files ("doc", installpath, filedict)
def run_checks ():
# Python version check.
if sys.version_info < PYTHON_MINIMUM: # major, minor check
raise Exception ("You should have at least Python >= %d.%d.x "
"installed." % PYTHON_MINIMUM)
# Pygame versioning checks.
pygame_version = None
try:
import pygame
if pygame.version.vernum < PYGAME_MINIMUM:
raise Exception ("You should have at least Pygame >= %d.%d.%d "
"installed" % PYGAME_MINIMUM)
pygame_version = pygame.version.ver
except ImportError:
pass
# Environment checks for the PAPI interfaces.
papi = False
atk_version = "not found"
if not check_pkgconfig ():
papi = False
else:
val = pkg_get_flags ("atk", "--modversion")
if val:
atk_version = val[0]
if atk_version >= ATK_MINIMUM:
papi = True
print "\nThe following information will be used to build OcempGUI:"
print "\t Python: %d.%d.%d" % sys.version_info[0:3]
print "\t Pygame: %s" % pygame_version
print "\t ATK: %s" % atk_version
print "\t Build Papi: %s\n" % papi
return papi
if __name__ == "__main__":
want_papi = False
try:
want_papi = run_checks ()
except Exception, detail:
print "Error:", detail
sys.exit (1)
docfiles = get_documentation_files ()
datafiles = get_data_files ()
setupdata = {
"name" : "OcempGUI",
"version" : VERSION,
"description": "Ocean Empire User Interface Library",
"author": "Marcus von Appen",
"author_email": "marcus@sysfault.org",
"license": "BSD license",
"url": "http://ocemp.sourceforge.net/gui.html",
"packages": ["ocempgui",
"ocempgui.access",
"ocempgui.draw",
"ocempgui.events",
"ocempgui.object",
"ocempgui.widgets",
"ocempgui.widgets.components",
"ocempgui.widgets.images"],
"data_files" : datafiles + docfiles,
"cmdclass" : { "install_data" : InstallData },
}
if not want_papi:
# Do not build the accessibility extension.
setup (**setupdata)
else:
# Try to build the setup with the extension.
includes, libdirs, libs = pkg_get_all_cflags ("atk")
gmodflags = pkg_get_all_cflags ("gmodule-2.0")
includes += gmodflags[0]
libdirs += gmodflags[1]
libs += gmodflags[2]
#includes += distutils.sysconfig.get_python_inc ()
defines = get_papi_defines ()
warn_flags = ["-W", "-Wall", "-Wpointer-arith", "-Wcast-qual",
"-Winline", "-Wcast-align", "-Wconversion",
"-Wstrict-prototypes", "-Wmissing-prototypes",
"-Wmissing-declarations", "-Wnested-externs",
"-Wshadow", "-Wredundant-decls"
]
compile_args = warn_flags + ["-std=c99","-g"]
papi = Extension ("ocempgui.access.papi", sources=get_papi_files (),
include_dirs=includes, library_dirs=libdirs,
libraries=libs, language="c",
define_macros=defines,
extra_compile_args=compile_args)
setupdata["ext_modules"] = [papi]
setup (**setupdata)
| |
import datetime
from django.contrib.auth.models import User
from django.test import Client
from mock import patch
from rapidsms.contrib.locations.models import Location, LocationType
from survey.forms.upload_csv_file import UploadWeightsForm
from survey.models import LocationWeight, Survey, UploadErrorLog, LocationTypeDetails
from survey.tests.base_test import BaseTest
from survey.views.location_widget import LocationWidget
from django.utils.timezone import utc
class UploadWeightsTest(BaseTest):
def setUp(self):
self.client = Client()
User.objects.create_user(username='useless', email='rajni@kant.com', password='I_Suck')
raj = self.assign_permission_to(User.objects.create_user('Rajni', 'rajni@kant.com', 'I_Rock'),
'can_view_batches')
self.client.login(username='Rajni', password='I_Rock')
self.reqion_type = LocationType.objects.create(name="region1", slug="region1")
self.district_type = LocationType.objects.create(name="district1", slug='district1')
self.county_type = LocationType.objects.create(name="county1", slug='county1')
region = Location.objects.create(name="region1", type=self.reqion_type)
district = Location.objects.create(name="district1", tree_parent=region, type=self.district_type)
Location.objects.create(name="county1", tree_parent=district, type=self.county_type)
region = Location.objects.create(name="region2", type=self.reqion_type)
district = Location.objects.create(name="district2", tree_parent=region, type=self.district_type)
Location.objects.create(name="county2", tree_parent=district, type=self.county_type)
LocationTypeDetails.objects.create(country=region, location_type=self.reqion_type)
LocationTypeDetails.objects.create(country=region, location_type=self.district_type)
LocationTypeDetails.objects.create(country=region, location_type=self.county_type)
self.filename = 'test_uganda.csv'
self.filedata = [['RegionName', 'DistrictName', 'CountyName', 'Selection Probability'],
['region1', 'district1', 'county1', '0.01'],
['region2', 'district2', 'county2', '0.1']]
self.write_to_csv('wb', self.filedata, self.filename)
self.file = open(self.filename, 'rb')
self.survey = Survey.objects.create(name="Survey")
def test_should_return_success_and_render_template(self):
response = self.client.get('/locations/weights/upload/')
self.assertEqual(200, response.status_code)
templates = [template.name for template in response.templates]
self.assertIn('locations/weights/upload.html', templates)
def test_should_render_context_data(self):
response = self.client.get('/locations/weights/upload/')
self.assertEqual(response.context['button_label'], "Upload")
self.assertEqual(response.context['id'], "upload-location-weights-form")
self.assertEqual(len(response.context['location_types']), 3)
expected_types = [self.reqion_type, self.district_type, self.county_type]
[self.assertIn(_type, response.context['location_types']) for _type in expected_types]
self.assertIsInstance(response.context['upload_form'], UploadWeightsForm)
def test_should_redirect_after_post(self):
data = {'file': self.file,
'survey': self.survey.id}
response = self.client.post("/locations/weights/upload/", data=data)
self.assertRedirects(response, '/locations/weights/upload/', status_code=302, target_status_code=200, msg_prefix='')
@patch('survey.services.location_weights_upload.UploadLocationWeights.upload')
def test_should_give_uploading_message(self, mock_upload):
data = {'file': self.file,
'survey': self.survey.id}
response = self.client.post('/locations/weights/upload/', data=data)
assert mock_upload.called
self.assertIn('Upload in progress. This could take a while.', response.cookies['messages'].value)
def test_should_upload_csv_sucess(self):
data = {'file': self.file,
'survey': self.survey.id}
response = self.client.post('/locations/weights/upload/', data=data)
for row in self.filedata[1:]:
location = Location.objects.get(name=row[-2], tree_parent__name=row[-3])
self.failUnless(LocationWeight.objects.filter(location=location, selection_probability=row[-1]))
parents_names = location.get_ancestors().values_list('name', flat=True)
[self.assertIn(location_name, parents_names) for location_name in row[0:-2]]
def test_upload_csv_failure_if_selection_probability_is_NaN(self):
filedata = [['RegionName', 'DistrictName', 'CountyName', 'Selection Probability'],
['region1', 'district1', 'county1', 'bla bli blo not a number'],
['region2', 'district2', 'county2', '0.1']]
self.write_to_csv('wb', filedata, self.filename)
file = open(self.filename, 'rb')
data = {'file': file,
'survey': self.survey.id}
row = filedata[1]
location = Location.objects.get(name=row[-2], tree_parent__name=row[-3])
LocationWeight.objects.filter(location=location).delete()
response = self.client.post('/locations/weights/upload/', data=data)
self.failIf(LocationWeight.objects.filter(location=location))
location = Location.objects.get(name="county2", tree_parent__name="district2")
self.failUnless(LocationWeight.objects.filter(location=location, selection_probability=0.1))
def test_assert_restricted_permissions(self):
self.assert_login_required('/locations/weights/upload/')
self.assert_restricted_permission_for('/locations/weights/upload/')
def test_should_get_list_and_returns_success_with_template(self):
country = LocationType.objects.create(name="Country", slug="country")
region = Location.objects.create(name="region1", type=self.reqion_type)
district = Location.objects.create(name="district1", tree_parent=region, type=self.district_type)
county = Location.objects.create(name="county1", tree_parent=district, type=self.county_type)
region1 = Location.objects.create(name="region2", type=self.reqion_type)
district1 = Location.objects.create(name="district2", tree_parent=region1, type=self.district_type)
county1 = Location.objects.create(name="county2", tree_parent=district1, type=self.county_type)
location_weight_1 = LocationWeight.objects.create(location=county, selection_probability=0.1, survey=self.survey)
location_weight_2 = LocationWeight.objects.create(location=county1, selection_probability=0.2, survey=self.survey)
response = self.client.get('/locations/weights/')
self.assertEqual(200, response.status_code)
templates = [template.name for template in response.templates]
self.assertIn('locations/weights/index.html', templates)
self.assertIn(location_weight_1, response.context['location_weights'])
self.assertIn(location_weight_2, response.context['location_weights'])
expected_location_types = [self.reqion_type, self.district_type, self.county_type]
[self.assertIn(_type, response.context['location_types']) for _type in expected_location_types]
self.assertNotIn(country, response.context['location_types'])
self.assertIsInstance(response.context['location_data'], LocationWidget)
self.assertEqual(1, len(response.context['surveys']))
self.assertIn(self.survey, response.context['surveys'])
self.assertIsNone(response.context['selected_survey'])
self.assertIsNotNone(response.context['request'])
def test_filter_list_weights_by_location(self):
district = Location.objects.create(name="district1", type=self.district_type)
county = Location.objects.create(name="county1", tree_parent=district, type=self.county_type)
region1 = Location.objects.create(name="region2", type=self.reqion_type)
district1 = Location.objects.create(name="district2", tree_parent=region1, type=self.district_type)
county1 = Location.objects.create(name="county2", tree_parent=district1, type=self.county_type)
weight_1 = LocationWeight.objects.create(location=county, selection_probability=0.1, survey=self.survey)
weight_2 = LocationWeight.objects.create(location=county1, selection_probability=0.2, survey=self.survey)
response = self.client.get('/locations/weights/?location=%d' % county1.id)
self.assertEqual(1, len(response.context['location_weights']))
self.assertIn(weight_2, response.context['location_weights'])
self.assertIsNone(response.context['selected_survey'])
def test_filter_list_weights_by_survey(self):
hoho_survey = Survey.objects.create(name="what hohoho")
district = Location.objects.create(name="district1", type=self.district_type)
county = Location.objects.create(name="county1", tree_parent=district, type=self.county_type)
region1 = Location.objects.create(name="region2", type=self.reqion_type)
district1 = Location.objects.create(name="district2", tree_parent=region1, type=self.district_type)
county1 = Location.objects.create(name="county2", tree_parent=district1, type=self.county_type)
weight_1 = LocationWeight.objects.create(location=county, selection_probability=0.1, survey=self.survey)
weight_2 = LocationWeight.objects.create(location=county1, selection_probability=0.2, survey=hoho_survey)
response = self.client.get('/locations/weights/?survey=%d' % self.survey.id)
self.assertEqual(1, len(response.context['location_weights']))
self.assertIn(weight_1, response.context['location_weights'])
self.assertEqual(self.survey, response.context['selected_survey'])
self.assertEqual(2, len(response.context['surveys']))
self.assertIn(self.survey, response.context['surveys'])
self.assertIn(hoho_survey, response.context['surveys'])
self.assertIn('list_weights_page', response.context['action'])
def test_filter_list_weights_by_location_and_survey(self):
hoho_survey = Survey.objects.create(name="what hohoho")
district = Location.objects.create(name="district1", type=self.district_type)
county = Location.objects.create(name="county1", tree_parent=district, type=self.county_type)
region1 = Location.objects.create(name="region2", type=self.reqion_type)
district1 = Location.objects.create(name="district2", tree_parent=region1, type=self.district_type)
county1 = Location.objects.create(name="county2", tree_parent=district1, type=self.county_type)
weight_1 = LocationWeight.objects.create(location=county, selection_probability=0.1, survey=self.survey)
weight_2 = LocationWeight.objects.create(location=county1, selection_probability=0.2, survey=self.survey)
response = self.client.get('/locations/weights/?survey=%d&location=%d' % (self.survey.id, county1.id))
self.assertEqual(1, len(response.context['location_weights']))
self.assertIn(weight_2, response.context['location_weights'])
self.assertEqual(self.survey, response.context['selected_survey'])
self.assertEqual(2, len(response.context['surveys']))
self.assertIn(self.survey, response.context['surveys'])
self.assertIn(hoho_survey, response.context['surveys'])
class UploadWeightsErrorLogTest(BaseTest):
def setUp(self):
self.client = Client()
User.objects.create_user(username='useless', email='rajni@kant.com', password='I_Suck')
raj = self.assign_permission_to(User.objects.create_user('Rajni', 'rajni@kant.com', 'I_Rock'),
'can_view_batches')
self.client.login(username='Rajni', password='I_Rock')
region = Location.objects.create(name="region1")
district = Location.objects.create(name="district1", tree_parent=region)
Location.objects.create(name="county1", tree_parent=district)
region = Location.objects.create(name="region2")
district = Location.objects.create(name="district2", tree_parent=region)
Location.objects.create(name="county2", tree_parent=district)
self.filename = 'test_uganda.csv'
self.filedata = [['RegionName', 'DistrictName', 'CountyName', 'Selection Probability'],
['region1', 'district1', 'county1', '0.01'],
['region2', 'district2', 'county2', '0.1']]
self.write_to_csv('wb', self.filedata, self.filename)
self.file = open(self.filename, 'rb')
self.survey = Survey.objects.create(name="Survey")
def test_should_get_list_and_returns_success_with_template(self):
error_log = UploadErrorLog.objects.create(model="WEIGHTS", error="Some error", filename="Some file", row_number=1)
error_log_1 = UploadErrorLog.objects.create(model="LOCATION", error="Some error", filename="Some file", row_number=25)
error_log_2 = UploadErrorLog.objects.create(model="WEIGHTS", error="Some error_2", filename="Some file", row_number=25)
response = self.client.get('/locations/weights/error_logs/')
self.assertEqual(200, response.status_code)
templates = [template.name for template in response.templates]
self.assertIn('locations/weights/error_logs.html', templates)
expected_errors = [error_log, error_log_2]
[self.assertIn(error, response.context['error_logs']) for error in expected_errors]
self.assertNotIn(error_log_1, response.context['error_logs'])
self.assertIsNotNone(response.context['request'])
today = datetime.datetime.now().strftime('%Y-%m-%d')
self.assertEqual(today, response.context['selected_from_date'])
self.assertEqual(today, response.context['selected_to_date'])
def test_should_filter_to_and_from_dates(self):
error_log = UploadErrorLog.objects.create(model="WEIGHTS", error="Some error", filename="Some file", row_number=1)
error_log_1 = UploadErrorLog.objects.create(model="LOCATION", error="Some error", filename="Some file", row_number=25)
error_log_2 = UploadErrorLog.objects.create(model="WEIGHTS", error="Some error_2", filename="Some file", row_number=25)
error_log_2.created = error_log_2.created.replace(tzinfo=utc) + datetime.timedelta(days=4)
error_log_2.save()
error_log_created = error_log.created.strftime('%Y-%m-%d')
response = self.client.get('/locations/weights/error_logs/?from_date=%s&to_date=%s'%(error_log_created, error_log_created))
self.assertEqual(200, response.status_code)
self.assertEqual(1, response.context['error_logs'].count())
self.assertIn(error_log, response.context['error_logs'])
self.assertEqual(error_log.created.replace(hour=0, minute=0, second=0, microsecond=0), response.context['selected_from_date'])
self.assertEqual(error_log.created.replace(hour=23, minute=59, second=0, microsecond=0), response.context['selected_to_date'])
def test_assert_restricted_permissions(self):
self.assert_login_required('/locations/weights/')
self.assert_restricted_permission_for('/locations/weights/')
self.assert_restricted_permission_for('/locations/weights/error_logs/')
| |
import os
import configparser
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
from pyspark.sql.functions import *
import Utils
class Readmission:
#
# Detect readmission given a set of data in OMOP format
#
def __init__(self, data, config, sc, sqlContext):
self.sc = sc
self.data = data
self.sqlContext = sqlContext
self.utils = Utils.Utils(sqlContext)
self.env = config.get('branch','env')
self.date_input_format = config.get(self.env+'.data','date_input_format')
self.readmission_days = config.get(self.env+'.readmission','readmission_days')
self.readmission_code_file = config.get(self.env+'.readmission','readmission_code_file')
self.diagnostic_code_file = config.get(self.env+'.readmission','diagnostic_code_file')
self.comorbidies_code_file = config.get(self.env+'.readmission','comorbidies_code_file')
self.readmission_codes = self.getReadmissionCodes(self.readmission_code_file)
self.diagnostic_codes = self.getDiagnosticCodes(self.diagnostic_code_file)
self.comorbitiy_codes = self.getComorbityCodes(self.comorbidies_code_file)
self.icd_diagnosis = config.get(self.env+'.readmission','icd_diagnosis')
self.icd_readmission = config.get(self.env+'.readmission','icd_readmission')
self.inpatient_condition_primary_diagnosis = config.get(self.env+'.readmission','inpatient_condition_primary_diagnosis').split(",")
self.inpatient_procedure_primary_diagnosis = config.get(self.env+'.readmission','inpatient_procedure_primary_diagnosis').split(",")
# find readmission patients based on criteria in the properties file
self.readmissionDfs,self.providerProcedureInfoDfs,self.deaths = self.readmissionPatients(data, config, self.diagnostic_codes, self.readmission_codes)
#
# read diagnostic codes from a file into a dictionary
#
def getDiagnosticCodes(self, filename):
codes = self.readFileToDict(filename)
return codes
#
# read readmission codes from a file into a dictionary
#
def getReadmissionCodes(self, filename):
codes = self.readFileToDict(filename)
return codes
#
# read readmission codes from a file into a dictionary
#
def getComorbityCodes(self, filename):
codes = self.readFileToDict(filename)
return codes
#
# read a file and convert name=values pairs to a dictionary
#
def readFileToDict(self, filename):
props = {}
with open(filename, 'r') as f:
for line in f:
line = line.rstrip() #removes trailing whitespace and '\n' chars
if "=" not in line: continue #skips blanks and comments w/o =
if line.startswith("#"): continue #skips comments which contain =
k, v = line.split("=", 1)
k = k.strip()
v = v.strip().split(",")
props[k] = v
return props
#
# find patients that have been readmitted and return
# a parallel data structure that contains information
# on only these individuals
#
def readmissionPatients(self, data, config, diagnostic_codes, readmission_codes):
# find readmission patients for each procedure
readmissionDfs = {} # dict of dataframe of readmission patients for each procedure
providerProcedureInfoDfs = {} # dict of provider event counts for each procedure
deaths = {} # dict of patients who died for each procedure
for key, value in diagnostic_codes.items():
# make sure we have readmission codes for this procedure
if key not in readmission_codes:
print("No readmission codes for this procedure. This procedure " + key + " will be skipped. ")
continue
# find events with codes of interest
condition_occurrence_f = self.utils.filterDataframeByCodes(data['condition_occurrence'],
diagnostic_codes[key],
'CONDITION_SOURCE_VALUE').cache()
procedure_occurrence_f = self.utils.filterDataframeByCodes(data['procedure_occurrence'],
diagnostic_codes[key],
'PROCEDURE_SOURCE_VALUE').cache()
# only consider inpatient stays where the icd code of interest is an inpatient primary diagnosis
condition_occurrence_f = self.utils.filterDataframeByCodes(condition_occurrence_f,
self.inpatient_condition_primary_diagnosis,
'CONDITION_TYPE_CONCEPT_ID').cache()
procedure_occurrence_f = self.utils.filterDataframeByCodes(procedure_occurrence_f,
self.inpatient_procedure_primary_diagnosis,
'PROCEDURE_TYPE_CONCEPT_ID').cache()
# find readmission events
condition_occurrence_r = self.utils.filterDataframeByCodes(data['condition_occurrence'],
readmission_codes[key],
'CONDITION_SOURCE_VALUE').cache()
procedure_occurrence_r = self.utils.filterDataframeByCodes(data['procedure_occurrence'],
readmission_codes[key],
'PROCEDURE_SOURCE_VALUE').cache()
# only consider readmission events where the icd code of interest is an inpatient primary diagnosis
condition_occurrence_r = self.utils.filterDataframeByCodes(condition_occurrence_r,
self.inpatient_condition_primary_diagnosis,
'CONDITION_TYPE_CONCEPT_ID').cache()
procedure_occurrence_r = self.utils.filterDataframeByCodes(procedure_occurrence_r,
self.inpatient_procedure_primary_diagnosis,
'PROCEDURE_TYPE_CONCEPT_ID').cache()
# find users with inpatient stay from the filtered condition_occurrence and procedure_occurrence dataframes
inpatient_co = self.utils.findPersonsWithInpatientStay(condition_occurrence_f,
'condition_occurrence',
'VISIT_END_DATE',
True,
self.date_input_format).cache()
inpatient_po = self.utils.findPersonsWithInpatientStay(procedure_occurrence_f,
'procedure_occurrence',
'VISIT_END_DATE',
True,
self.date_input_format).cache()
inpatient_events = inpatient_co.unionAll(inpatient_po).cache()
# find complications. Only occurs in the condition_occurrence table
complications = self.utils.findPersonsWithInpatientStay(condition_occurrence_r,
'condition_occurrence',
'VISIT_START_DATE',
True,
self.date_input_format).cache()
# now find readmissions
readmissionDfs[key] = self.findReadmissionPersons(inpatient_events,
complications,
self.readmission_days).cache()
deaths[key] = self.utils.findDeathAfterEvent(inpatient_events,
self.readmission_days,
self.date_input_format).cache()
# create a dataframe to summarize the provider total procedure count and complication rate
providerEventCount = self.utils.countProviderOccurrence(inpatient_events,
self.sqlContext).withColumnRenamed("COUNT", "PROCEDURE_COUNT").cache()
providerComplicationCount = self.utils.countProviderOccurrence(readmissionDfs[key],
self.sqlContext).withColumnRenamed("COUNT", "READMISSION_COUNT").cache()
providerDeathCount = self.utils.countProviderOccurrence(deaths[key],
self.sqlContext).withColumnRenamed("COUNT", "DEATH_COUNT").cache()
providerProcedureInfo = providerEventCount.join(providerComplicationCount, 'PROVIDER_ID', how='left')
providerProcedureInfo = providerProcedureInfo.fillna(0)
providerProcedureInfo = providerProcedureInfo.join(providerDeathCount, 'PROVIDER_ID', how='left')
providerProcedureInfo = providerProcedureInfo.fillna(0)
providerProcedureInfo = providerProcedureInfo.withColumn('COMPLICATION_COUNT',
providerProcedureInfo.READMISSION_COUNT + providerProcedureInfo.DEATH_COUNT)
providerProcedureInfo = providerProcedureInfo.withColumn('PERCENTAGE',
providerProcedureInfo.COMPLICATION_COUNT/providerProcedureInfo.PROCEDURE_COUNT)
providerProcedureInfoDfs[key] = providerProcedureInfo
return readmissionDfs,providerProcedureInfoDfs,deaths
#
# find persons that have been readmitted to the hospital
# OMOP tables are global so do not need to be passed to the function
# dates must be date objects
#
def findReadmissionPersons(self, inpatient_events, complications, days):
inpatient_events.registerTempTable('inpatient_events')
complications.registerTempTable('complications')
sqlString = "select distinct inpatient_events.PERSON_ID, inpatient_events.VISIT_END_DATE, inpatient_events.PROVIDER_ID, complications.SOURCE_VALUE from inpatient_events join complications where inpatient_events.PERSON_ID=complications.PERSON_ID and inpatient_events.VISIT_END_DATE < complications.VISIT_START_DATE and complications.VISIT_START_DATE < date_add(inpatient_events.VISIT_END_DATE," + days + ")"
df = self.sqlContext.sql(sqlString)
return df
#
# For a particular icd code, count the number of occurrences
# This is done by summing the count values in condition_occurrence and procedure_occurrence
# Tables condition_occurrence and procedure_occurrence are global
#
def icdGrouping(self, sqlContext):
icd_co = sqlContext.sql("select CONDITION_SOURCE_VALUE SOURCE_VALUE, count(*) COUNT_CO \
from condition_occurrence group by CONDITION_SOURCE_VALUE")
icd_po = sqlContext.sql("select PROCEDURE_SOURCE_VALUE SOURCE_VALUE, count(*) COUNT_PO \
from procedure_occurrence group by PROCEDURE_SOURCE_VALUE")
icd_all = icd_co.join(icd_po,'SOURCE_VALUE', how='outer').fillna(0)
icd_all = icd_all.withColumn('COUNT', icd_all.COUNT_CO + icd_all.COUNT_PO)
return icd_all
#
# For a particular icd code, count the number of principal admission diagnosis codes for patients
# undergoing each of the procedures.
#
def icdGroupingPrimary(self, data):
icd_co_temp = self.utils.filterDataframeByCodes(data['condition_occurrence'],
self.inpatient_condition_primary_diagnosis,
'CONDITION_TYPE_CONCEPT_ID')
icd_po_temp = self.utils.filterDataframeByCodes(data['procedure_occurrence'],
self.inpatient_condition_primary_diagnosis,
'PROCEDURE_TYPE_CONCEPT_ID')
icd_co_temp.registerTempTable('condition_occurrence_primary')
icd_po_temp.registerTempTable('procedure_occurrence_primary')
icd_co = self.sqlContext.sql("select CONDITION_SOURCE_VALUE SOURCE_VALUE, count(*) COUNT_CO \
from condition_occurrence_primary group by CONDITION_SOURCE_VALUE")
icd_po = self.sqlContext.sql("select PROCEDURE_SOURCE_VALUE SOURCE_VALUE, count(*) COUNT_PO \
from procedure_occurrence_primary group by PROCEDURE_SOURCE_VALUE")
icd_all = icd_co.join(icd_po,'SOURCE_VALUE', how='outer').fillna(0)
icd_all = icd_all.withColumn('COUNT', icd_all.COUNT_CO + icd_all.COUNT_PO)
return icd_all
#
# find counts of icd codes
# If primary_only flag is set, only count those icd codes designated as primary inpatient codes
#
def writeCodesAndCount(self, sqlContext, codes, directory, filename, primary_only):
if not os.path.exists(directory):
os.makedirs(directory)
if primary_only:
# look only for icd codes that are primary inpatient
icd_all = self.icdGroupingPrimary(sqlContext, self.data, self.inpatient_condition_primary_diagnosis, self.inpatient_procedure_primary_diagnosis).toPandas()
else:
# look at all icd codes
icd_all = self.icdGrouping(sqlContext).toPandas()
icd_def = self.utils.readFileIcd9('icd/icd9/CMS32_DESC_LONG_DX.txt') # read icd9 definitions into dict
f = open(os.path.join(directory,filename), "w")
total_for_all = 0
for key, value in codes.items():
f.write("Procedure: " + key + "\n")
f.write("code, count, description\n")
total = 0
for code in value:
if icd_all[icd_all.SOURCE_VALUE==code].empty:
icd_count=0
else:
icd_count = icd_all[icd_all.SOURCE_VALUE==code].COUNT.item()
total += icd_count
if code not in icd_def:
icd_description = ""
else:
icd_description = icd_def[code]
outstring = code + "," + str(icd_count) + "," + icd_description + "\n"
f.write(outstring)
totalString = "Total Count For This procedure: " + str(total) + "\n\n"
f.write(totalString)
total_for_all += total
totalForAllString = "Total Count For All Procedures: " + str(total_for_all) + "\n"
f.write(totalForAllString)
f.close()
#
# For a particular icd code, count the number of occurrences
# This is done by summing the count values in condition_occurrence and procedure_occurrence
# Tables condition_occurrence and procedure_occurrence are global
#
def readmissionGrouping(self, sqlContext, readmission):
readmission.registerTempTable('readmissioN')
icd_count = sqlContext.sql("select SOURCE_VALUE, count(*) COUNT from readmission group by SOURCE_VALUE")
return icd_count
#
# find code counts for readmission event
#
def writeReadmissionCodesAndCount(self, sqlContext, codes, readmissionDfs, directory, filename):
if not os.path.exists(directory):
os.makedirs(directory)
icd_def = self.utils.readFileIcd9('icd/icd9/CMS32_DESC_LONG_DX.txt') # read icd9 definitions into dict
f = open(os.path.join(directory,filename), "w")
total_for_all = 0
for key, value in codes.items():
icd_all = self.readmissionGrouping(sqlContext, readmissionDfs[key]).toPandas()
f.write("Procedure: " + key + "\n")
f.write("code, count, description\n")
total = 0
for code in value:
if icd_all[icd_all.SOURCE_VALUE==code].empty:
icd_count=0
else:
icd_count = icd_all[icd_all.SOURCE_VALUE==code].count.item()
total += icd_count
if code not in icd_def:
icd_description = ""
else:
icd_description = icd_def[code]
outstring = code + "," + str(icd_count) + "," + icd_description + "\n"
f.write(outstring)
totalString = "Total Count For This procedure: " + str(total) + "\n\n"
f.write(totalString)
total_for_all += total
totalForAllString = "Total Count For All Procedures: " + str(total_for_all) + "\n"
f.write(totalForAllString)
f.close()
| |
#
# Copyright 2017 CNIT - Consorzio Nazionale Interuniversitario per le Telecomunicazioni
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.middleware.csrf import get_token
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from lib.util import Util
from sf_user.models import CustomUser
import git
# DO NOT REMOVE THIS COMMENT #
# Project Models #
from projecthandler.srv6_net_prog_model import Srv6_net_progProject
from projecthandler.cran_model import CranProject
from projecthandler.nemo_model import NemoProject
from projecthandler.toscanfv_model import ToscanfvProject
from projecthandler.superfluidity_model import SuperfluidityProject
from projecthandler.oshi_model import OshiProject
from projecthandler.click_model import ClickProject
from projecthandler.etsi_model import EtsiProject
from projecthandler.models import Project
from projecthandler.tosca_model import ToscaProject
# DO NOT REMOVE THIS COMMENT #
# Project Model Type declarations #
Project.add_project_type('srv6_net_prog', Srv6_net_progProject)
Project.add_project_type('cran', CranProject)
Project.add_project_type('nemo', NemoProject)
Project.add_project_type('toscanfv', ToscanfvProject)
Project.add_project_type('superfluidity', SuperfluidityProject)
Project.add_project_type('oshi', OshiProject)
Project.add_project_type('etsi', EtsiProject)
Project.add_project_type('click', ClickProject)
Project.add_project_type('tosca', ToscaProject)
from projecthandler.models import Repository
@login_required
def home(request):
return render(request, 'home.html', {})
@login_required
def create_new_project(request):
if request.method == 'POST':
error_msgs = []
user = CustomUser.objects.get(id=request.user.id)
name = request.POST.get('name', 'WithoutName')
info = request.POST.get('info', ' ')
type = request.POST.get('type', '')
start_from = request.POST.get('startfrom', 'scratch')
project_types = Project.get_project_types()
if type in project_types:
project_class = project_types[type]
else:
# FIXME this error is not handled
error_msgs.append('Project type undefined.')
return render(request, 'error.html',
{'error_msg': 'Error creating new project, project type undefined. Please retry.'})
try:
if start_from == 'scratch':
print 'from scratch'
data_project = {}
elif start_from == 'files':
print 'from files'
data_project = project_class.data_project_from_files(request)
elif start_from == 'example':
print 'from example'
data_project = project_class.data_project_from_example(request)
#print(data_project)
project = project_class.create_project(name, user, False, info, data_project)
#print project.get_dataproject()
except Exception as e:
print 'Error creating ' + type + ' project! Please retry.'
print e
return render(request, 'error.html', {'error_msg': 'Error creating ' + type + ' project! Please retry.'})
return redirect('projects:open_project', project_id=project.id)
elif request.method == 'GET':
csrf_token_value = get_token(request)
result = {}
data_type_selector = [{
'id': '-1',
'text': 'Select an option'
}]
type_example_files = {}
type_container_template = ''
project_types = Project.get_project_types()
print "project_types", project_types.keys()
for type in project_types:
project_class = project_types[type]
type_example_files.update(project_class.get_example_list())
data_type_selector.append({
'id': type,
'text': type,
'value': type
})
type_container_template += render_to_string(type + '/' + type + '_new_project.html')
result.update({'type_example_files': json.dumps(type_example_files)})
result.update({'data_type_selector': json.dumps(data_type_selector)})
result.update({'type_container_template': type_container_template})
result.update({'csrf_token': csrf_token_value})
return render(request, 'new_project.html', result)
@login_required
def user_projects(request):
csrf_token_value = get_token(request)
user = CustomUser.objects.get(id=request.user.id)
projects = Project.objects.filter(owner=user).select_subclasses()
return render(request, 'projectlist.html', {
'projects': list(projects),
'csrf_token': csrf_token_value
})
@login_required
def open_project(request, project_id=None):
try:
projects = Project.objects.filter(id=project_id).select_subclasses()
project_overview = projects[0].get_overview_data()
prj_token = project_overview['type']
print request.COOKIES.keys()
return render(request, prj_token + '/' + prj_token + '_project_details.html',
{'project_overview': project_overview, 'project_id': project_id})
except Exception as e:
print e
return render(request, 'error.html', {'error_msg': 'Error open project! Please retry.'})
@login_required
def delete_project(request, project_id=None):
if request.method == 'POST':
try:
Project.objects.filter(id=project_id).delete()
return redirect('projects:projects_list')
except Exception as e:
print e
return render(request, 'error.html', {'error_msg': 'Error deleting Project.'})
elif request.method == 'GET':
try:
projects = Project.objects.filter(id=project_id).select_subclasses()
project_overview = projects[0].get_overview_data()
prj_token = project_overview['type']
# example: 'etsi/etsi_project_delete.html'
print prj_token + '/' + prj_token + '_project_delete.html', project_overview['name']
return render(request, prj_token + '/' + prj_token + '_project_delete.html',
{'project_id': project_id, 'project_name': project_overview['name']})
except Exception as e:
print e
return render(request, 'error.html', {'error_msg': 'Project not found.'})
@login_required
def translate_push(request, project_id=None):
if request.method == 'POST':
result = {'project_id': project_id}
try:
#result = {}
url = None
desc_name = request.POST.get('descId', '')
start_from = request.POST.get('startfrom')
commit_msg = request.POST.get('commit_msg', '')
translator_id = request.POST.get('translator_id', '')
if start_from == 'new':
name_repo = request.POST.get('name_repo', '')
base_url_repo = request.POST.get('base_url_repo', '')
repo = Repository.objects.create(name=name_repo, base_url=base_url_repo)
else:
repo_id = request.POST.get('repo_id', '')
repo = Repository.objects.get(id=repo_id)
if repo is None:
raise Exception("Repository Not Found")
result['repo'] = repo
user = CustomUser.objects.get(id=request.user.id)
projects = Project.objects.filter(id=project_id).select_subclasses()
if len(projects) == 0:
raise Exception("Project Not Found")
project_overview = projects[0].get_overview_data()
result['project_overview_data'] = project_overview
prj_token = project_overview['type']
url = prj_token + '/' + prj_token + '_push_report.html'
repo.fetch_repository()
report = projects[0].translate_push_ns_on_repository(translator_id, desc_name, repo, **{'repo_path': '/tmp/git_repo/' + repo.name,
'commit_msg': commit_msg if commit_msg is not '' else None})
#result['report'] = report
except Exception as e:
print e
url = 'error.html' if url is None else url
error_msg = 'Error push on git repo. \n'
if isinstance(e, git.GitCommandError):
error_msg += e.stdout + '\n'
error_msg += e.stderr + '\n'
result['error_msg'] = error_msg
return __response_handler(request, result, url)
print url
return __response_handler(request, result, url, to_redirect=False)
@login_required
def show_descriptors(request, project_id=None, descriptor_type=None):
csrf_token_value = get_token(request)
projects = Project.objects.filter(id=project_id).select_subclasses()
project_overview = projects[0].get_overview_data()
prj_token = project_overview['type']
page = prj_token + '/' + prj_token + '_project_descriptors.html'
return render(request, page, {
'descriptors': projects[0].get_descriptors(descriptor_type),
'project_id': project_id,
'project_type': prj_token,
'project_overview_data': project_overview,
"csrf_token_value": csrf_token_value,
'descriptor_type': descriptor_type
})
@login_required
def graph(request, project_id=None):
if request.method == 'GET':
csrf_token_value = get_token(request)
projects = Project.objects.filter(id=project_id).select_subclasses()
project_overview = projects[0].get_overview_data()
prj_token = project_overview['type']
# example : 'etsi/project_graph.html'
return render(request, prj_token + '/project_graph.html', {
'project_id': project_id,
'project_overview_data': projects[0].get_overview_data(),
'collapsed_sidebar': False
})
@login_required
def graph_data(request, project_id=None, descriptor_id=None):
print 'graph_data', project_id, descriptor_id
projects = Project.objects.filter(id=project_id).select_subclasses()
project_overview = projects[0].get_overview_data()
# data = projects[0].get_overview_data()
prj_token = project_overview['type']
topology = projects[0].get_graph_data_json_topology(descriptor_id)
#print "topology", topology, "enyfgydfytd"
response = HttpResponse(topology, content_type="application/json")
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def download(request, project_id=None):
csrf_token_value = get_token(request)
projects = Project.objects.filter(id=project_id).select_subclasses()
if request.method == 'POST':
# projects = EtsiProject.objects.filter(id=project_id)
in_memory = projects[0].get_zip_archive()
response = HttpResponse(content_type="application/zip")
response["Content-Disposition"] = "attachment; filename=export_" + project_id + ".zip"
ret_zip = in_memory.getvalue()
in_memory.close()
response.write(ret_zip)
return response
elif request.method == 'GET':
return render(request, 'download.html', {
'project_id': project_id,
'project_overview_data': projects[0].get_overview_data(),
})
@login_required
def delete_descriptor(request, project_id=None, descriptor_type=None, descriptor_id=None):
csrf_token_value = get_token(request)
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].delete_descriptor(descriptor_type, descriptor_id)
project_overview = projects[0].get_overview_data()
prj_token = project_overview['type']
page = prj_token + '/' + prj_token + '_project_descriptors.html'
# if project_overview['type'] == 'etsi':
# page = 'etsi/etsi_project_descriptors.html'
# elif project_overview['type'] == 'click':
# page = 'click/click_project_descriptors.html'
return render(request, page, {
'descriptors': projects[0].get_descriptors(descriptor_type),
'project_id': project_id,
'project_overview_data': project_overview,
"csrf_token_value": csrf_token_value,
'descriptor_type': descriptor_type,
'alert_message': {
'success': result,
'message': "Delete succeeded!" if result else 'Error in delete'}
})
@login_required
def clone_descriptor(request, project_id=None, descriptor_type=None, descriptor_id=None):
csrf_token_value = get_token(request)
projects = Project.objects.filter(id=project_id).select_subclasses()
new_id = request.GET.get('newid', '')
result = projects[0].clone_descriptor(descriptor_type, descriptor_id, new_id)
project_overview = projects[0].get_overview_data()
prj_token = project_overview['type']
page = prj_token + '/' + prj_token + '_project_descriptors.html'
# if project_overview['type'] == 'etsi':
# page = 'etsi/etsi_project_descriptors.html'
# elif project_overview['type'] == 'click':
# page = 'click/click_project_descriptors.html'
return render(request, page, {
'descriptors': projects[0].get_descriptors(descriptor_type),
'project_id': project_id,
'project_overview_data': project_overview,
"csrf_token_value": csrf_token_value,
'descriptor_type': descriptor_type,
'alert_message': {
'success': result,
'message': "Cloned!" if result else 'Error in cloning'}
})
@login_required
def new_descriptor(request, project_id=None, descriptor_type=None):
projects = Project.objects.filter(id=project_id).select_subclasses()
project_overview = projects[0].get_overview_data()
prj_token = project_overview['type']
page = prj_token + '/descriptor/descriptor_new.html'
if request.method == 'GET':
request_id = request.GET.get('id', '')
json_template = projects[0].get_new_descriptor(descriptor_type, request_id)
print 'new descriptor GET', json_template
descriptor_string_yaml = Util.json2yaml(json_template)
descriptor_string_json = json.dumps(json_template)
return render(request, page, {
'project_id': project_id,
'descriptor_type': descriptor_type,
'descriptor_id': request_id,
'project_overview_data': project_overview,
'descriptor_strings': {'descriptor_string_yaml': descriptor_string_yaml,
'descriptor_string_json': descriptor_string_json}
})
elif request.method == 'POST':
csrf_token_value = get_token(request)
data_type = request.POST.get('type')
print "TYPE", data_type
if data_type == "file":
file_uploaded = request.FILES['file']
text = file_uploaded.read()
data_type = file_uploaded.name.split(".")[-1]
desc_name = file_uploaded.name.split(".")[0]
result = projects[0].create_descriptor(desc_name, descriptor_type, text, data_type)
else:
text = request.POST.get('text')
desc_name = request.POST.get('id')
result = projects[0].create_descriptor(desc_name, descriptor_type, text, data_type)
response_data = {
'project_id': project_id,
'descriptor_type': descriptor_type,
'project_overview_data': projects[0].get_overview_data(),
'descriptor_id': result,
'alert_message': {
'success': True if result != False else False,
'message': "Descriptor created" if result else 'Error in creation'}
}
status_code = 200 if result != False else 500
response = HttpResponse(json.dumps(response_data), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def edit_descriptor(request, project_id=None, descriptor_id=None, descriptor_type=None):
if request.method == 'POST':
print "edit_descriptor"
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].edit_descriptor(descriptor_type, descriptor_id, request.POST.get('text'),
request.POST.get('type'))
response_data = {
'project_id': project_id,
'descriptor_type': descriptor_type,
'project_overview_data': projects[0].get_overview_data(),
'alert_message': {
'success': result,
'message': "Descriptor modified." if result else 'Error during descriptor editing.'}
}
status_code = 200 if result else 500
response = HttpResponse(json.dumps(response_data), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
elif request.method == 'GET':
csrf_token_value = get_token(request)
projects = Project.objects.filter(id=project_id).select_subclasses()
project_overview = projects[0].get_overview_data()
print project_overview
prj_token = project_overview['type']
page = prj_token + '/descriptor/descriptor_view.html'
descriptor = projects[0].get_descriptor(descriptor_id, descriptor_type)
descriptor_string_json = json.dumps(descriptor)
descriptor_string_yaml = Util.json2yaml(descriptor)
#print "METODO GET DESCRIPTOR", descriptor_id
return render(request, page, {
'project_id': project_id,
'descriptor_id': descriptor_id,
'project_overview_data': projects[0].get_overview_data(),
'descriptor_type': descriptor_type,
'descriptor_strings': {'descriptor_string_yaml': descriptor_string_yaml,
'descriptor_string_json': descriptor_string_json}})
@login_required
def graph_positions(request, project_id=None):
if request.method == 'POST':
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].edit_graph_positions(json.loads(request.POST.get('positions')))
status_code = 200 if result else 500
response = HttpResponse(json.dumps({}), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def add_element(request, project_id=None):
if request.method == 'POST':
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].get_add_element(request)
status_code = 200 if result else 500
response = HttpResponse(json.dumps({}), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def remove_element(request, project_id=None):
if request.method == 'POST':
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].get_remove_element(request)
status_code = 200 if result else 500
response = HttpResponse(json.dumps({}), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def add_link(request, project_id=None):
if request.method == 'POST':
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].get_add_link(request)
status_code = 200 if result else 500
response = HttpResponse(json.dumps({}), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def remove_link(request, project_id=None):
if request.method == 'POST':
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].get_remove_link(request)
status_code = 200 if result else 500
response = HttpResponse(json.dumps({}), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def get_available_nodes(request, project_id=None):
if request.method == 'GET':
csrf_token_value = get_token(request)
projects = Project.objects.filter(id=project_id).select_subclasses()
print "get_available_nodes", request.GET.dict()
result = projects[0].get_available_nodes(request.GET.dict())
status_code = 500 if result == None else 200
print json.dumps(result)
response = HttpResponse(json.dumps(result), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def overviewelement(request, project_id=None):
if request.method == 'GET':
result = {}
error_msg = None
try:
projects = Project.objects.filter(id=project_id).select_subclasses()
project = projects[0]
parameters = request.GET.dict()
print "parameters", parameters
result = project.get_node_overview(**parameters)
except Exception as e:
error_msg = str(e)
if error_msg is not None:
return JsonResponse({'error': {'error_msg': str(error_msg)}})
return JsonResponse({'node_overview': result})
# ETSI specific method #
@login_required
def add_node_to_vnffg(request, project_id=None):
print "add_node_to_vnffg" # TODO log
if request.method == 'POST':
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].add_node_to_vnffg(request)
status_code = 200 if result else 500
response = HttpResponse(json.dumps({}), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
@login_required
def unused_vnf(request, project_id=None, nsd_id=None):
if request.method == 'GET':
print 'in method unused_vnf : ', project_id, nsd_id # TODO log
projects = Project.objects.filter(id=project_id).select_subclasses()
result = projects[0].get_unused_vnf(nsd_id)
status_code = 500 if result == None else 200
response = HttpResponse(json.dumps(result), content_type="application/json", status=status_code)
response["Access-Control-Allow-Origin"] = "*"
return response
# end ETSI specific method #
# TOSCA specific method #
@login_required
def generatehottemplate(request, project_id=None, project=None, descriptor_id=None, descriptor_type=None):
"""It is one of the custom_action methods
Heat Orchestration Template
NB The project is already extracted from project_id in the generic custom_action method
"""
result = {}
error_msg = None
try:
result = project.get_generatehotemplate(request, descriptor_id, descriptor_type)
except Exception as e:
error_msg = e
if error_msg is not None:
return JsonResponse({'error': {'error_msg': str(error_msg)}})
return JsonResponse({'hot': str(result)})
# end TOSCA specific method #
@login_required
def custom_action(request, project_id=None, descriptor_id=None, descriptor_type=None, action_name=None):
if request.method == 'GET':
projects = Project.objects.filter(id=project_id).select_subclasses()
print "Custom action: " + action_name
return globals()[action_name](request, project_id, projects[0], descriptor_id, descriptor_type)
## Repo section
@login_required
def repos_list(request):
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
url = None
result = {}
try:
options = {}
for key in ('name'):
value = request.GET.get(key)
if value:
options[key] = value
repos = Repository.objects.filter(**options).values()
if 'application/json' in raw_content_types:
result = {'repos': list(repos)}
else:
url = 'repository/repo_list.html'
result = {'repos': list(repos)}
except Exception as e:
print e
url = 'error.html'
result = {'error_msg': 'Agents not found.'}
return __response_handler(request, result, url)
@login_required
def create_new_repo(request):
if request.method == 'POST':
try:
name = request.POST.get('name', '')
base_url = request.POST.get('base_url', ' ')
Repository.objects.create(name=name, base_url=base_url)
except Exception as e:
print e
url = 'error.html'
result = {'error_msg': 'Error creating ' + type + ' Repository! Please retry.'}
return __response_handler(request, result, url)
return redirect('repos:repos_list')
@login_required
def delete_repo(request, repo_id=None):
try:
Repository.objects.filter(id=repo_id).delete()
url = 'repos:repos_list'
result = {}
except Exception as e:
print e
url = 'error.html'
result = {'error_msg': 'Error deleting ' + repo_id + ' Repository! Please retry.'}
return __response_handler(request, result, url, to_redirect=True)
def translators_type_list(request):
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
url = None
result = {}
try:
if 'application/json' in raw_content_types:
result = {'translator_type': [
{'id': 0, 'name': 'Ansible playbook with Heat', 't_id': 'sf2heat'},
{'id': 1, 'name': 'Ansible playbook with K8s', 't_id': 'k8sansible'},
]}
except Exception as e:
print e
url = 'error.html'
result = {'error_msg': 'Unknown error.'}
return __response_handler(request, result, url)
def __response_handler(request, data_res, url=None, to_redirect=None, *args, **kwargs):
raw_content_types = request.META.get('HTTP_ACCEPT', '*/*').split(',')
#print raw_content_types, data_res, url, to_redirect
if 'application/json' in raw_content_types:
return JsonResponse(data_res)
elif to_redirect:
return redirect(url, *args, **kwargs)
else:
return render(request, url, data_res)
| |
from __future__ import unicode_literals
from cloudant import Cloudant
from flask import Flask, render_template, request, jsonify, url_for, request
from flask_bootstrap import Bootstrap
from forms import FrontPage
from sttClient import function_for_scripting
from src_to_target_translator import translate_to_language
import atexit
import cf_deployment_tracker
import os
import json
import youtube_dl
import requests
import swiftclient
from keystoneclient import client
import re
#########Object Storage################
auth_url= "https://identity.open.softlayer.com/v3"
project= "object_storage_eb563998_b07e_495b_8bfa_12e018b92e58"
projectId= "d69ea7a6562148bca85060be87f2c033"
region= "dallas"
userId= "d6e41d8205d642b8b7eca5be34953bb9"
username= "admin_0ddb396d554a6b65c4db3f2d4afc2f28026155f0"
password= "NM[#mkT7]DigVV6f"
domainId= "e6c378bb33244a6e91410fc2338ba829"
domainName= "1396879"
role= "admin"
#######################################
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
Bootstrap(app)
app.config['SECRET_KEY'] = '435897348y348f3784hf7'
db_name = 'mydb'
client = None
db = None
class MyLogger(object):
def debug(self, msg):
pass
def warning(self, msg):
pass
def error(self, msg):
print(msg)
def my_hook(d):
if d['status'] == 'finished':
print('Done downloading, now converting ...')
if 'VCAP_SERVICES' in os.environ:
vcap = json.loads(os.getenv('VCAP_SERVICES'))
print('Found VCAP_SERVICES')
if 'cloudantNoSQLDB' in vcap:
creds = vcap['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
elif os.path.isfile('vcap-local.json'):
with open('vcap-local.json') as f:
vcap = json.load(f)
print('Found local VCAP_SERVICES')
creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
user = creds['username']
password = creds['password']
url = 'https://' + creds['host']
client = Cloudant(user, password, url=url, connect=True)
db = client.create_database(db_name, throw_on_exists=False)
# On Bluemix, get the port number from the environment variable PORT
# When running this app on the local machine, default the port to 8080
port = int(os.getenv('PORT', 8080))
def function_scripting_wrapper(inputFile, model_language):
function_for_scripting("f3685733-7ea7-45a6-abf7-84e6a4c6d255:o8xSeAutevzS", inputFile, "./output", "audio/webm", model_language, 5)
@app.route('/', methods=['GET', 'POST'])
def home():
video_url = None
message = None
if request.method == "POST":
video_url = request.form['youtube_url']
source_language = request.form['source_language']
target_language = request.form['target_language']
# model_us = "en-US_BroadbandModel"
# model_esp = "es-ES_BroadbandModel"
# model_fr = "fr-FR_BroadbandModel"
# model_ch = "zh-CN_BroadbandModel"
# model_jp = "ja-JP_BroadbandModel"
dict_for_languages = {}
dict_for_languages['english'] = ["en-US_BroadbandModel", "en"]
dict_for_languages['spanish'] = ["es-ES_BroadbandModel", "es"]
dict_for_languages['french'] = ["fr-FR_BroadbandModel", "fr"]
dict_for_languages['chinese'] = ["zh-CN_BroadbandModel", "zh"]
dict_for_languages['japanese'] = ["ja-JP_BroadbandModel", "ja"]
if 1:
ydl_opts = {
'format': 'webm',
'logger': MyLogger(),
'progress_hooks': [my_hook],
'outtmpl': '0001.webm',
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([video_url])
###########Object Storage##################
container_name = 'audio'
# File name for testing
file_name = '0001.webm'
IBM_Objectstorage_Connection = swiftclient.Connection(key=password,authurl=auth_url,auth_version='3',
os_options={"project_id": projectId,"user_id": userId,"region_name": region})
#IBM_Objectstorage_Connection.put_container(container_name)
#print "nContainer %s created successfully." % container_name
print 'start to upload'
example_file = open('0001.webm', 'r')
IBM_Objectstorage_Connection.put_object(container_name,file_name,contents=example_file.read(),content_type='webm')
example_file.close()
###########Object Storage##################
print 'start to download'
Object_Store_file_details = IBM_Objectstorage_Connection.get_object(container_name, file_name)
function_scripting_wrapper(file_name, dict_for_languages[source_language][0])
if source_language != target_language:
translate_to_language(dict_for_languages[source_language][1], dict_for_languages[target_language][1])
###########Object Storage##################
container_name = 'subtitle'
# File name for testing
file_name = 'output.srt'
IBM_Objectstorage_Connection = swiftclient.Connection(key=password,authurl=auth_url,auth_version='3',
os_options={"project_id": projectId,"user_id": userId,"region_name": region})
#IBM_Objectstorage_Connection.put_container(container_name)
#print "nContainer %s created successfully." % container_name
print 'start to upload'
example_file = open(file_name, 'r')
IBM_Objectstorage_Connection.put_object(container_name,file_name,contents=example_file.read(),content_type='srt')
example_file.close()
###########Object Storage##################
#os.system("python ./sttClient.py -credentials f3685733-7ea7-45a6-abf7-84e6a4c6d255:o8xSeAutevzS -model en-US_BroadbandModel")
message = "Video Done"
#os.system("rm 0001.webm")
return render_template('index.html', video_url=video_url, message=message)
@app.route('/video', methods=['GET', 'POST'])
def video():
video_url = request.args.get('video_url')
print(video_url)
return render_template('video.html', video_url=video_url)
# /* Endpoint to greet and add a new visitor to database.
# * Send a POST request to localhost:8080/api/visitors with body
# * {
# * "name": "Bob"
# * }
# */
@app.route('/api/visitors', methods=['GET'])
def get_visitor():
if client:
return jsonify(list(map(lambda doc: doc['name'], db)))
else:
print('No database')
return jsonify([])
# /**
# * Endpoint to get a JSON array of all the visitors in the database
# * REST API example:
# * <code>
# * GET http://localhost:8080/api/visitors
# * </code>
# *
# * Response:
# * [ "Bob", "Jane" ]
# * @return An array of all the visitor names
# */
@app.route('/api/visitors', methods=['POST'])
def put_visitor():
user = request.json['name']
if client:
data = {'name':user}
db.create_document(data)
return 'Hello %s! I added you to the database.' % user
else:
print('No database')
return 'Hello %s!' % user
"""
@app.route('/download')
def download():
r = requests.get('file:///Users/home/Desktop/att_hackathon_jjys-master/output.srt')
with open("output.srt", "wb") as code:
code.write(r.content)
return render_template('index.html')"""
@atexit.register
def shutdown():
if client:
client.disconnect()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=False)
| |
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from guardian.compat import get_model_name, url, patterns
from django.contrib import admin
from django.contrib import messages
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext, ugettext_lazy as _
from guardian.compat import get_user_model
from guardian.forms import UserObjectPermissionsForm
from guardian.forms import GroupObjectPermissionsForm
from guardian.shortcuts import get_perms
from guardian.shortcuts import get_users_with_perms
from guardian.shortcuts import get_groups_with_perms
from guardian.shortcuts import get_perms_for_model
from guardian.models import Group
class AdminUserObjectPermissionsForm(UserObjectPermissionsForm):
"""
Extends :form:`UserObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class AdminGroupObjectPermissionsForm(GroupObjectPermissionsForm):
"""
Extends :form:`GroupObjectPermissionsForm`. It only overrides
``get_obj_perms_field_widget`` method so it return
``django.contrib.admin.widgets.FilteredSelectMultiple`` widget.
"""
def get_obj_perms_field_widget(self):
return FilteredSelectMultiple(_("Permissions"), False)
class GuardedModelAdminMixin(object):
"""
Serves as a helper for custom subclassing ``admin.ModelAdmin``.
"""
change_form_template = \
'admin/guardian/model/change_form.html'
obj_perms_manage_template = \
'admin/guardian/model/obj_perms_manage.html'
obj_perms_manage_user_template = \
'admin/guardian/model/obj_perms_manage_user.html'
obj_perms_manage_group_template = \
'admin/guardian/model/obj_perms_manage_group.html'
user_can_access_owned_objects_only = False
user_owned_objects_field = 'user'
user_can_access_owned_by_group_objects_only = False
group_owned_objects_field = 'group'
include_object_permissions_urls = True
def queryset(self, request):
"""
This method has been renamed to get_queryset in Django 1.6.
"""
return self.get_queryset(request)
def get_queryset(self, request):
super_class = super(GuardedModelAdminMixin, self)
if hasattr(super_class, 'get_queryset'):
# Django 1.6 or later.
qs = super_class.get_queryset(request)
else:
# Django 1.5 or earlier.
qs = super_class.queryset(request)
if request.user.is_superuser:
return qs
if self.user_can_access_owned_objects_only:
filters = {self.user_owned_objects_field: request.user}
qs = qs.filter(**filters)
if self.user_can_access_owned_by_group_objects_only:
User = get_user_model()
user_rel_name = User.groups.field.related_query_name()
qs_key = '%s__%s' % (self.group_owned_objects_field, user_rel_name)
filters = {qs_key: request.user}
qs = qs.filter(**filters)
return qs
def get_urls(self):
"""
Extends standard admin model urls with the following:
- ``.../permissions/`` under ``app_mdodel_permissions`` url name (params: object_pk)
- ``.../permissions/user-manage/<user_id>/`` under ``app_model_permissions_manage_user`` url name (params: object_pk, user_pk)
- ``.../permissions/group-manage/<group_id>/`` under ``app_model_permissions_manage_group`` url name (params: object_pk, group_pk)
.. note::
``...`` above are standard, instance detail url (i.e.
``/admin/flatpages/1/``)
"""
urls = super(GuardedModelAdminMixin, self).get_urls()
if self.include_object_permissions_urls:
info = self.model._meta.app_label, get_model_name(self.model)
myurls = patterns('',
url(r'^(?P<object_pk>.+)/permissions/$',
view=self.admin_site.admin_view(self.obj_perms_manage_view),
name='%s_%s_permissions' % info),
url(r'^(?P<object_pk>.+)/permissions/user-manage/(?P<user_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_user_view),
name='%s_%s_permissions_manage_user' % info),
url(r'^(?P<object_pk>.+)/permissions/group-manage/(?P<group_id>\-?\d+)/$',
view=self.admin_site.admin_view(
self.obj_perms_manage_group_view),
name='%s_%s_permissions_manage_group' % info),
)
urls = myurls + urls
return urls
def get_obj_perms_base_context(self, request, obj):
"""
Returns context dictionary with common admin and object permissions
related content.
"""
context = {
'adminform': {'model_admin': self},
'media': self.media,
'object': obj,
'app_label': self.model._meta.app_label,
'opts': self.model._meta,
'original': hasattr(obj, '__unicode__') and obj.__unicode__() or\
str(obj),
'has_change_permission': self.has_change_permission(request, obj),
'model_perms': get_perms_for_model(obj),
'title': _("Object permissions"),
}
return context
def obj_perms_manage_view(self, request, object_pk):
"""
Main object permissions view. Presents all users and groups with any
object permissions for the current model *instance*. Users or groups
without object permissions for related *instance* would **not** be
shown. In order to add or manage user or group one should use links or
forms presented within the page.
"""
obj = get_object_or_404(self.get_queryset(request), pk=object_pk)
users_perms = SortedDict(
get_users_with_perms(obj, attach_perms=True,
with_group_users=False))
users_perms.keyOrder.sort(key=lambda user:
getattr(user, get_user_model().USERNAME_FIELD))
groups_perms = SortedDict(
get_groups_with_perms(obj, attach_perms=True))
groups_perms.keyOrder.sort(key=lambda group: group.name)
if request.method == 'POST' and 'submit_manage_user' in request.POST:
user_form = UserManage(request.POST)
group_form = GroupManage()
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
if user_form.is_valid():
user_id = user_form.cleaned_data['user'].id
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user_id]
)
return redirect(url)
elif request.method == 'POST' and 'submit_manage_group' in request.POST:
user_form = UserManage()
group_form = GroupManage(request.POST)
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
if group_form.is_valid():
group_id = group_form.cleaned_data['group'].id
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group_id]
)
return redirect(url)
else:
user_form = UserManage()
group_form = GroupManage()
context = self.get_obj_perms_base_context(request, obj)
context['users_perms'] = users_perms
context['groups_perms'] = groups_perms
context['user_form'] = user_form
context['group_form'] = group_form
return render_to_response(self.get_obj_perms_manage_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_template(self):
"""
Returns main object permissions admin template. May be overridden if
need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage.html'
return self.obj_perms_manage_template
def obj_perms_manage_user_view(self, request, object_pk, user_id):
"""
Manages selected users' permissions for current object.
"""
user = get_object_or_404(get_user_model(), id=user_id)
obj = get_object_or_404(self.get_queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_user_form()
form = form_class(user, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
url = reverse(
'%s:%s_%s_permissions_manage_user' % info,
args=[obj.pk, user.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['user_obj'] = user
context['user_perms'] = get_perms(user, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_user_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_user_template(self):
"""
Returns object permissions for user admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_user.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_user.html'
return self.obj_perms_manage_user_template
def get_obj_perms_manage_user_form(self):
"""
Returns form class for user object permissions management. By default
:form:`AdminUserObjectPermissionsForm` is returned.
"""
return AdminUserObjectPermissionsForm
def obj_perms_manage_group_view(self, request, object_pk, group_id):
"""
Manages selected groups' permissions for current object.
"""
group = get_object_or_404(Group, id=group_id)
obj = get_object_or_404(self.get_queryset(request), pk=object_pk)
form_class = self.get_obj_perms_manage_group_form()
form = form_class(group, obj, request.POST or None)
if request.method == 'POST' and form.is_valid():
form.save_obj_perms()
msg = ugettext("Permissions saved.")
messages.success(request, msg)
info = (
self.admin_site.name,
self.model._meta.app_label,
get_model_name(self.model)
)
url = reverse(
'%s:%s_%s_permissions_manage_group' % info,
args=[obj.pk, group.id]
)
return redirect(url)
context = self.get_obj_perms_base_context(request, obj)
context['group_obj'] = group
context['group_perms'] = get_perms(group, obj)
context['form'] = form
return render_to_response(self.get_obj_perms_manage_group_template(),
context, RequestContext(request, current_app=self.admin_site.name))
def get_obj_perms_manage_group_template(self):
"""
Returns object permissions for group admin template. May be overridden
if need to change it dynamically.
.. note::
If ``INSTALLED_APPS`` contains ``grappelli`` this function would
return ``"admin/guardian/grappelli/obj_perms_manage_group.html"``.
"""
if 'grappelli' in settings.INSTALLED_APPS:
return 'admin/guardian/contrib/grappelli/obj_perms_manage_group.html'
return self.obj_perms_manage_group_template
def get_obj_perms_manage_group_form(self):
"""
Returns form class for group object permissions management. By default
:form:`AdminGroupObjectPermissionsForm` is returned.
"""
return AdminGroupObjectPermissionsForm
class GuardedModelAdmin(GuardedModelAdminMixin, admin.ModelAdmin):
"""
Extends ``django.contrib.admin.ModelAdmin`` class. Provides some extra
views for object permissions management at admin panel. It also changes
default ``change_form_template`` option to
``'admin/guardian/model/change_form.html'`` which is required for proper
url (object permissions related) being shown at the model pages.
**Extra options**
``GuardedModelAdmin.obj_perms_manage_template``
*Default*: ``admin/guardian/model/obj_perms_manage.html``
``GuardedModelAdmin.obj_perms_manage_user_template``
*Default*: ``admin/guardian/model/obj_perms_manage_user.html``
``GuardedModelAdmin.obj_perms_manage_group_template``
*Default*: ``admin/guardian/model/obj_perms_manage_group.html``
``GuardedModelAdmin.user_can_access_owned_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects he or she doesn't own (checking ``user`` field
of used model - field name may be overridden by
``user_owned_objects_field`` option).
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.user_can_access_owned_by_group_objects_only``
*Default*: ``False``
If this would be set to ``True``, ``request.user`` would be used to
filter out objects her or his group doesn't own (checking if any group
user belongs to is set as ``group`` field of the object; name of the
field can be changed by overriding ``group_owned_objects_field``).
.. note::
Please remember that this will **NOT** affect superusers!
Admins would still see all items.
``GuardedModelAdmin.group_owned_objects_field``
*Default*: ``group``
``GuardedModelAdmin.include_object_permissions_urls``
*Default*: ``True``
.. versionadded:: 1.2
Might be set to ``False`` in order **NOT** to include guardian-specific
urls.
**Usage example**
Just use :admin:`GuardedModelAdmin` instead of
``django.contrib.admin.ModelAdmin``.
.. code-block:: python
from django.contrib import admin
from guardian.admin import GuardedModelAdmin
from myapp.models import Author
class AuthorAdmin(GuardedModelAdmin):
pass
admin.site.register(Author, AuthorAdmin)
"""
class UserManage(forms.Form):
user = forms.CharField(label=_("User identification"),
max_length=200,
error_messages = {'does_not_exist': _("This user does not exist")},
help_text=_('Enter a value compatible with User.USERNAME_FIELD')
)
def clean_user(self):
"""
Returns ``User`` instance based on the given identification.
"""
identification = self.cleaned_data['user']
user_model = get_user_model()
try:
username_field = user_model.USERNAME_FIELD
except AttributeError:
username_field = 'username'
try:
user = user_model.objects.get(**{username_field: identification})
return user
except user_model.DoesNotExist:
raise forms.ValidationError(
self.fields['user'].error_messages['does_not_exist'])
class GroupManage(forms.Form):
group = forms.CharField(max_length=80, error_messages={'does_not_exist':
_("This group does not exist")})
def clean_group(self):
"""
Returns ``Group`` instance based on the given group name.
"""
name = self.cleaned_data['group']
try:
group = Group.objects.get(name=name)
return group
except Group.DoesNotExist:
raise forms.ValidationError(
self.fields['group'].error_messages['does_not_exist'])
| |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilties for data generation for Wikipedia Revision problem.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import re
import subprocess
import six
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import text_encoder
import tensorflow as tf
def to_unicode(s):
return unicode(s, "utf-8") if six.PY2 else s.decode("utf-8")
def include_revision(revision_num, skip_factor=1.1):
"""Decide whether to include a revision.
If the number of revisions is large, we exclude some revisions to avoid
a quadratic blowup in runtime, since the article is likely also large.
We make the ratio between consecutive included revision numbers
appproximately equal to "factor".
Args:
revision_num: an integer
skip_factor: a floating point number >= 1.0
Returns:
a boolean
"""
if skip_factor <= 1.0:
return True
return (int(math.log(revision_num + 1.0) / math.log(skip_factor)) != int(
math.log(revision_num + 2.0) / math.log(skip_factor)))
def file_page_generator(my_file, max_page_size=2**28):
"""Read wikipedia pages from a history dump.
Since some pages can be terabytes in size (with all the revisions),
we limit page size to max_page_size bytes.
Args:
my_file: an open file object.
max_page_size: an integer
Yields:
strings
"""
page_start = " <page>\n"
page_end = " </page>\n"
chunk_size = max_page_size
page_start = " <page>\n"
page_end = " </page>\n"
leftovers = ""
while True:
chunk = my_file.read(chunk_size)
if not chunk:
break
chunk = leftovers + chunk
current_pos = 0
while True:
start_pos = chunk.find(page_start, current_pos)
if start_pos == -1:
break
end_pos = chunk.find(page_end, start_pos)
if end_pos == -1:
if len(chunk) - start_pos > max_page_size:
leftovers = ""
else:
leftovers = chunk[start_pos:]
break
raw_page = chunk[start_pos + len(page_start):end_pos]
if len(raw_page) < max_page_size:
ret = parse_page(raw_page)
if ret:
yield ret
current_pos = end_pos + len(page_end)
def get_title(page):
"""Extract the title from a page.
Args:
page: a string
Returns:
a string
"""
start_pos = page.find("<title>")
end_pos = page.find("</title>")
assert start_pos != -1
assert end_pos != -1
start_pos += len("<title>")
return to_unicode(page[start_pos:end_pos])
def get_id(page):
"""Extract the id from a page.
Args:
page: a string
Returns:
an integer
"""
start_pos = page.find("<id>")
end_pos = page.find("</id>")
assert start_pos != -1
assert end_pos != -1
start_pos += len("<id>")
return int(page[start_pos:end_pos])
def get_revisions(page):
"""Extract the revisions of a page.
Args:
page: a string
Returns:
a list of strings
"""
start_string = " <revision>\n"
end_string = " </revision>\n"
ret = []
current_pos = 0
while True:
start_pos = page.find(start_string, current_pos)
if start_pos == -1:
break
end_pos = page.find(end_string, start_pos)
assert end_pos != -1
ret.append(page[start_pos + len(start_string):end_pos])
current_pos = end_pos + len(end_string)
return ret
def parse_page(raw_page):
"""Create a dictionary with title, id, and list of revisions.
The dictionary contains:
"title": a string
"id": an integer
"revisions": a list of strings
Args:
raw_page: a string
Returns:
a dictionary, or None in the case of an error.
"""
ret = {"title": get_title(raw_page), "id": get_id(raw_page)}
if ":" in ret["title"]:
return None
ret["revisions"] = get_revisions(raw_page)
return ret
def maybe_copy_file_to_directory(source_filepath, target_directory):
"""Copy a file to a directory if it is not already there.
Returns the target filepath.
Args:
source_filepath: a string
target_directory: a string
Returns:
a string
"""
if not tf.gfile.Exists(target_directory):
tf.logging.info("Creating directory %s" % target_directory)
os.mkdir(target_directory)
target_filepath = os.path.join(target_directory,
os.path.basename(source_filepath))
if not tf.gfile.Exists(target_filepath):
tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath))
tf.gfile.Copy(source_filepath, target_filepath)
statinfo = os.stat(target_filepath)
tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath,
statinfo.st_size))
else:
tf.logging.info("Not copying, file already found: %s" % target_filepath)
return target_filepath
def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp):
"""Generate pages from a list of .7z encoded history dumps.
Args:
corpus_files: a list of strings
tmp_dir: a string
max_page_size_exp: an integer
Yields:
strings
"""
for remote_filepath in corpus_files:
filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir)
tf.logging.info("Reading from " + filepath)
command = ["7z", "x", "-so", filepath]
tf.logging.info("Running command: %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1)
for page in file_page_generator(p.stdout, 2**max_page_size_exp):
yield page
def get_text(revision, strip=True):
"""Extract the text from a revision.
Args:
revision: a string
strip: a boolean
Returns:
a string
"""
# text start tag looks like "<text ..otherstuff>"
start_pos = revision.find("<text")
assert start_pos != -1
end_tag_pos = revision.find(">", start_pos)
assert end_tag_pos != -1
end_tag_pos += len(">")
end_pos = revision.find("</text>")
if end_pos == -1:
ret = ""
else:
ret = revision[end_tag_pos:end_pos]
if strip:
ret = strip_text(ret)
ret = to_unicode(ret)
return ret
def strip_text(text):
"""Strip wikipedia-stuff out of text, making it mostly prose.
The reason for this is to learn a model that is good at editing prose.
Args:
text: a string
Returns:
a string
"""
return _remove_boring_lines(
_remove_triple_quotes(
_remove_double_brackets(
_remove_references(_remove_curly_braces(text)))))
def _find_and_replace(text, start_string, end_string, replace_fn):
"""Remove everything found between instances of start_string and end_string.
Replace each such instance with replace_fn(removed_text)
e.g. _find_and_replace("the [[fat]] cat [[sat]]", "[[", "]]", lambda x: x)
= "the fat cat sat"
Args:
text: a string
start_string: a string
end_string: a string
replace_fn: a unary function from string to string
Returns:
a string
"""
ret = ""
current_pos = 0
while True:
start_pos = text.find(start_string, current_pos)
if start_pos == -1:
ret += text[current_pos:]
break
ret += text[current_pos:start_pos]
end_pos = text.find(end_string, start_pos + len(start_string))
if end_pos == -1:
break
ret += replace_fn(text[start_pos + len(start_string):end_pos])
current_pos = end_pos + len(end_string)
return ret
def _remove_references(text):
return _find_and_replace(text, "<ref", "</ref>", lambda s: "")
def _remove_triple_quotes(text):
return _find_and_replace(text, "'''", "'''", lambda s: s)
def _remove_curly_braces(text):
"""Remove everything in curly braces.
Curly braces may be nested, so we keep track of depth.
Args:
text: a string
Returns:
a string
"""
current_pos = 0
depth = 0
ret = ""
for match in re.finditer("[{}]", text):
if depth == 0:
ret += text[current_pos:match.start()]
depth += 1 if text[match.start()] == "{" else -1
current_pos = match.end()
if depth != 0:
# Many articles have mismatched braces, but it still seems better to remove
# them than not.
pass
else:
ret += text[current_pos:]
return ret
def _remove_double_brackets(text):
"""Remove double brackets, but leave the viewable text.
Args:
text: a string
Returns:
a string
"""
def replacement_fn(s):
if ":" in s:
# this is probably a category or something like that.
return ""
# keep the part after the bar.
bar_pos = s.find("|")
if bar_pos == -1:
return s
return s[bar_pos + 1:]
return _find_and_replace(text, "[[", "]]", replacement_fn)
def _remove_boring_lines(text):
"""Remove lines that do not start with a letter or a quote.
From inspecting the data, this seems to leave in most prose and remove
most weird stuff.
Args:
text: a string
Returns:
a string
"""
lines = text.split("\n")
filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)]
return "\n".join(filtered)
def all_corpus_files(data_prefix):
return sorted(tf.gfile.Glob(data_prefix + "*"))
def corpus_files_for_shard(shard_num, train_shards, dev_shards, data_prefix):
corpus_files = [
filename for i, filename in enumerate(all_corpus_files(data_prefix))
if i % (train_shards + dev_shards) == shard_num
]
tf.logging.info("Corpus files for shard %s: %s", shard_num, corpus_files)
assert shard_num < (train_shards + dev_shards)
return corpus_files
def vocab_filename(approx_vocab_size, strip):
return "vocab.wiki_revision%s.%d" % (".strip" if strip else "",
approx_vocab_size)
def get_or_generate_vocabulary(data_dir,
tmp_dir,
data_prefix,
max_page_size_exp,
approx_vocab_size=32768,
strip=True):
"""Get or generate the vocabulary.
Args:
data_dir: a string
tmp_dir: a string
data_prefix: a string
max_page_size_exp: an integer
approx_vocab_size: an integer
strip: a boolean
Returns:
a TextEncoder
"""
num_pages_for_vocab_generation = approx_vocab_size // 3
vocab_file = vocab_filename(approx_vocab_size, strip)
def my_generator(data_prefix):
"""Line generator for vocab."""
count = 0
for page in corpus_page_generator(
all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp):
revisions = page["revisions"]
if revisions:
text = get_text(revisions[-1], strip=strip)
yield text
count += 1
if count % 100 == 0:
tf.logging.info("reading pages for vocab %d" % count)
if count > num_pages_for_vocab_generation:
break
return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file,
approx_vocab_size,
my_generator(data_prefix))
def get_encoder_from_vocab(vocab_filepath):
"""Get encoder from vocab file.
If vocab is not found in output dir, it will be copied there by
copy_vocab_to_output_dir to clarify the vocab used to generate the data.
Args:
vocab_filepath: path to vocab, either local or cns
Returns:
A SubwordTextEncoder vocabulary object. None if the output_parallel_text
is set.
"""
if not tf.gfile.Exists(vocab_filepath):
raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath))
tf.logging.info("Found vocab file: %s", vocab_filepath)
encoder = text_encoder.SubwordTextEncoder(vocab_filepath)
return encoder
def throw_empty_pairs(src_tgt_pairs):
"""Filter [src,tgt] tuple from input list of pairs if either element is empty.
Args:
src_tgt_pairs: list of (src,tgt) pairs
Returns:
subset of input pair list for which all elements are non-empty
"""
return [x for x in src_tgt_pairs if x[0] and x[1]]
def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0):
"""Filter out examples that exceed max_edit_ratio between source and target.
Args:
source_target_input: a list of [source, target] pairs
max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars
between source and target
Returns:
source_target_output: filtered subset of [source, target] input pairs
thrown_out_count: number of examples filtered out
"""
thrown_out_count = 0
source_target_output = []
if not max_equal_to_diff_ratio:
return source_target_input, thrown_out_count
for i in range(len(source_target_input)):
src = source_target_input[i][0]
tgt = source_target_input[i][1]
opcodes = fast_match_sequences(src, tgt)
diff_char_count = 0
equal_char_count = 0
for tag, i1, i2, j1, j2 in opcodes:
if tag == "diff":
# max() prevents double-counting substitutions.
diff_char_count += max(i2 - i1, j2 - j1)
else:
equal_char_count += i2 - i1
if diff_char_count <= max_equal_to_diff_ratio * equal_char_count:
source_target_output.append(source_target_input[i])
else:
thrown_out_count += 1
return source_target_output, thrown_out_count
def introduce_errors(s,
corruption_rate=3e-3,
infill_marker="|?|",
max_infill_len=8):
"""Artificially add spelling errors and infill markers.
This function should be applied to the inputs of a correction model.
The artificial errors are particularly useful to train a network to
correct spelling when the training data does not contain many
natural errors.
Also replaces some substrings with an "infill" marker. e.g.
"the fat cat sat on the mat" -> "the fat ca??? the mat"
This causes the trained model to learn infilling (predicting what text
to insert at the current cursor position).
Args:
s: a string (the uncorrupted text)
corruption_rate: a floating point value. Probability of introducing an
error/infill at each character.
infill_marker: a string
max_infill_len: an optional integer - maximum number of characters to remove
and replace by an infill marker. None means no infilling.
Returns:
a string
"""
num_errors = 0
ret = []
operations = [
"delete", # delete a character
"insert", # insert a random character from the input string
"replace", # replace a character with a random character from
# the input string
"transpose", # transpose two adjacent characters
]
if max_infill_len:
operations.append("infill")
pos = 0
while pos < len(s):
if random.random() >= corruption_rate:
ret.append(s[pos])
pos += 1
continue
num_errors += 1
operation = operations[random.randint(0, len(operations) - 1)]
if operation == "delete":
pos += 1
elif operation == "insert":
ret.append(s[random.randint(0, len(s) - 1)])
elif operation == "replace":
ret.append(s[random.randint(0, len(s) - 1)])
pos += 1
elif operation == "transpose":
ret.append(s[pos + 1] if pos + 1 < len(s) else "")
ret.append(s[pos])
pos += 2
else:
assert operation == "infill"
ret.append(infill_marker)
pos += random.randint(0, max_infill_len)
return "".join(ret), num_errors
def fast_match_sequences(a,
b,
a_start=0,
a_end=None,
b_start=0,
b_end=None,
min_match_length=3,
max_recursion_depth=128):
"""Compute diffs between two sequences.
This function is similar in functionality and spirit to
difflib.SequenceMatcher.get_opcodes, but it seems to run faster.
if a_start, a_end, b_start, b_end are specified, then we compute diffs of
the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices
are relative to the full sequence.
We try to match the longest matching segments first, but due to heuristics
in finding the matches, this is not guaranteed.
Matching segments shorter than min_match_length are counted as part of the
surrounding differing segments, unless they are at the beginning or end of
both sequences. This helps eliminate junk matches.
Args:
a: a sequence
b: a sequence
a_start: an optional integer
a_end: an optional integer
b_start: an optional integer
b_end: an optional integer
min_match_length: an integer
max_recursion_depth: an integer - avoids crashes in weird corner cases
involving pairs of long repetitive sequences.
Returns:
a list of 5-tuples (tag, i1, i2, j1, j2).
Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2].
tag is either "equal" or "diff". Note that the tags differ from those
returned by difflib.SequenceMatcher.get_opcodes.
"""
if a_end is None:
a_end = len(a)
if b_end is None:
b_end = len(b)
if a_start == a_end and b_start == b_end:
return []
if a_start == a_end or b_start == b_end:
return [("diff", a_start, a_end, b_start, b_end)]
# Compute an index from value to first occurrence in the b segment.
# Technically, we should index and explore all occurrences of a value,
# but that might be much slower.
b_index = {}
for j in range(b_end - 1, b_start - 1, -1):
b_index[b[j]] = j
# we will look for the longest match we can find.
max_match_length = 0
a_pos = a_start
while a_pos < a_end:
val = a[a_pos]
b_pos = b_index.get(val)
if b_pos is None:
a_pos += 1
continue
else:
a_match_start = a_pos
a_match_end = a_pos + 1
b_match_start = b_pos
b_match_end = b_pos + 1
while (a_match_start > a_start and b_match_start > b_start and
a[a_match_start - 1] == b[b_match_start - 1]):
a_match_start -= 1
b_match_start -= 1
while (a_match_end < a_end and b_match_end < b_end and
a[a_match_end] == b[b_match_end]):
a_match_end += 1
b_match_end += 1
# Compute the length of the matching segment. We prefer the longest.
match_length = a_match_end - a_match_start
# Extra credit for matching at the beginning or end of the sequence.
if a_match_start == 0 and b_match_start == 0:
match_length += min_match_length
if a_match_end == len(a) and b_match_end == len(b):
match_length += min_match_length
if match_length > max_match_length:
max_match_length = match_length
best_match = (a_match_start, a_match_end, b_match_start, b_match_end)
# advance a_pos to the end of this match to avoid wasting time
# rediscovering this match.
a_pos = a_match_end
if max_match_length < min_match_length or max_recursion_depth == 0:
return [("diff", a_start, a_end, b_start, b_end)]
a_match_start, a_match_end, b_match_start, b_match_end = best_match
return (fast_match_sequences(
a, b, a_start, a_match_start, b_start, b_match_start, min_match_length,
max_recursion_depth - 1) + [
("equal", a_match_start, a_match_end, b_match_start, b_match_end)
] + fast_match_sequences(a, b, a_match_end, a_end, b_match_end, b_end,
min_match_length, max_recursion_depth - 1))
| |
import pytest
from unittest import mock
from rest_framework import serializers
from awx.api.versioning import reverse
from awx.main.utils.encryption import decrypt_field
from awx.conf import fields
from awx.conf.registry import settings_registry
from awx.conf.models import Setting
from awx.sso import fields as sso_fields
@pytest.fixture
def dummy_setting():
class context_manager(object):
def __init__(self, name, **kwargs):
self.name = name
self.kwargs = kwargs
def __enter__(self):
settings_registry.register(self.name, **(self.kwargs))
def __exit__(self, *args):
settings_registry.unregister(self.name)
return context_manager
@pytest.fixture
def dummy_validate():
class context_manager(object):
def __init__(self, category_slug, func):
self.category_slug = category_slug
self.func = func
def __enter__(self):
settings_registry.register_validate(self.category_slug, self.func)
def __exit__(self, *args):
settings_registry.unregister_validate(self.category_slug)
return context_manager
@pytest.mark.django_db
def test_non_admin_user_does_not_see_categories(api_request, dummy_setting, normal_user):
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
):
response = api_request(
'get',
reverse('api:setting_category_list',
kwargs={'version': 'v2'})
)
assert response.data['results']
response = api_request(
'get',
reverse('api:setting_category_list',
kwargs={'version': 'v2'}),
user=normal_user
)
assert not response.data['results']
@pytest.mark.django_db
def test_setting_singleton_detail_retrieve(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR_1',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
), dummy_setting(
'FOO_BAR_2',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
):
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.status_code == 200
assert 'FOO_BAR_1' in response.data and response.data['FOO_BAR_1'] is None
assert 'FOO_BAR_2' in response.data and response.data['FOO_BAR_2'] is None
@pytest.mark.django_db
def test_setting_singleton_detail_invalid_retrieve(api_request, dummy_setting, normal_user):
with dummy_setting(
'FOO_BAR_1',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
), dummy_setting(
'FOO_BAR_2',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
):
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'barfoo'})
)
assert response.status_code == 404
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
user = normal_user
)
assert response.status_code == 403
@pytest.mark.django_db
def test_setting_signleton_retrieve_hierachy(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
default=0,
category='FooBar',
category_slug='foobar'
):
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == 0
s = Setting(key='FOO_BAR', value=1)
s.save()
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == 1
@pytest.mark.django_db
def test_setting_singleton_retrieve_readonly(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
read_only=True,
default=2,
category='FooBar',
category_slug='foobar'
):
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == 2
@pytest.mark.django_db
def test_setting_singleton_update(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
), mock.patch('awx.conf.views.handle_setting_changes'):
api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': 3}
)
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == 3
api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': 4}
)
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == 4
@pytest.mark.django_db
def test_setting_singleton_update_hybriddictfield_with_forbidden(api_request, dummy_setting):
# Some HybridDictField subclasses have a child of _Forbidden,
# indicating that only the defined fields can be filled in. Make
# sure that the _Forbidden validator doesn't get used for the
# fields. See also https://github.com/ansible/awx/issues/4099.
with dummy_setting(
'FOO_BAR',
field_class=sso_fields.SAMLOrgAttrField,
category='FooBar',
category_slug='foobar',
), mock.patch('awx.conf.views.handle_setting_changes'):
api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': {'saml_admin_attr': 'Admins', 'saml_attr': 'Orgs'}}
)
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == {'saml_admin_attr': 'Admins', 'saml_attr': 'Orgs'}
@pytest.mark.django_db
def test_setting_singleton_update_dont_change_readonly_fields(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
read_only=True,
default=4,
category='FooBar',
category_slug='foobar'
), mock.patch('awx.conf.views.handle_setting_changes'):
api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': 5}
)
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == 4
@pytest.mark.django_db
def test_setting_singleton_update_dont_change_encrypted_mark(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR',
field_class=fields.CharField,
encrypted=True,
category='FooBar',
category_slug='foobar'
), mock.patch('awx.conf.views.handle_setting_changes'):
api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': 'password'}
)
assert Setting.objects.get(key='FOO_BAR').value.startswith('$encrypted$')
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == '$encrypted$'
api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': '$encrypted$'}
)
assert decrypt_field(Setting.objects.get(key='FOO_BAR'), 'value') == 'password'
api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': 'new_pw'}
)
assert decrypt_field(Setting.objects.get(key='FOO_BAR'), 'value') == 'new_pw'
@pytest.mark.django_db
def test_setting_singleton_update_runs_custom_validate(api_request, dummy_setting, dummy_validate):
def func_raising_exception(serializer, attrs):
raise serializers.ValidationError('Error')
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
), dummy_validate(
'foobar', func_raising_exception
), mock.patch('awx.conf.views.handle_setting_changes'):
response = api_request(
'patch',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'}),
data={'FOO_BAR': 23}
)
assert response.status_code == 400
@pytest.mark.django_db
def test_setting_singleton_delete(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
category='FooBar',
category_slug='foobar'
), mock.patch('awx.conf.views.handle_setting_changes'):
api_request(
'delete',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert not response.data['FOO_BAR']
@pytest.mark.django_db
def test_setting_singleton_delete_no_read_only_fields(api_request, dummy_setting):
with dummy_setting(
'FOO_BAR',
field_class=fields.IntegerField,
read_only=True,
default=23,
category='FooBar',
category_slug='foobar'
), mock.patch('awx.conf.views.handle_setting_changes'):
api_request(
'delete',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
response = api_request(
'get',
reverse('api:setting_singleton_detail', kwargs={'category_slug': 'foobar'})
)
assert response.data['FOO_BAR'] == 23
| |
# -*- coding: utf-8 -*-
"""
requests.auth
~~~~~~~~~~~~~
This module contains the authentication handlers for Requests.
"""
import os
import re
import time
import hashlib
import logging
from base64 import b64encode
from .compat import urlparse, str
from .utils import parse_dict_header
try:
from ._oauth import (Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER, extract_params)
except (ImportError, SyntaxError):
SIGNATURE_HMAC = None
SIGNATURE_TYPE_AUTH_HEADER = None
try:
import kerberos as k
except ImportError as exc:
k = None
log = logging.getLogger(__name__)
CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
def _basic_auth_str(username, password):
"""Returns a Basic Auth string."""
return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1')
class AuthBase(object):
"""Base class that all auth implementations derive from"""
def __call__(self, r):
raise NotImplementedError('Auth hooks must be callable.')
class OAuth1(AuthBase):
"""Signs the request using OAuth 1 (RFC5849)"""
def __init__(self, client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None, verifier=None):
try:
signature_type = signature_type.upper()
except AttributeError:
pass
self.client = Client(client_key, client_secret, resource_owner_key,
resource_owner_secret, callback_uri, signature_method,
signature_type, rsa_key, verifier)
def __call__(self, r):
"""Add OAuth parameters to the request.
Parameters may be included from the body if the content-type is
urlencoded, if no content type is set an educated guess is made.
"""
# split(";") because Content-Type may be "multipart/form-data; boundary=xxxxx"
contenttype = r.headers.get('Content-Type', '').split(";")[0].lower()
# extract_params will not give params unless the body is a properly
# formatted string, a dictionary or a list of 2-tuples.
decoded_body = extract_params(r.data)
# extract_params can only check the present r.data and does not know
# of r.files, thus an extra check is performed. We know that
# if files are present the request will not have
# Content-type: x-www-form-urlencoded. We guess it will have
# a mimetype of multipart/form-data and if this is not the case
# we assume the correct header will be set later.
_oauth_signed = True
if r.files and contenttype == CONTENT_TYPE_MULTI_PART:
# Omit body data in the signing and since it will always
# be empty (cant add paras to body if multipart) and we wish
# to preserve body.
r.url, r.headers, _ = self.client.sign(
unicode(r.full_url), unicode(r.method), None, r.headers)
elif decoded_body is not None and contenttype in (CONTENT_TYPE_FORM_URLENCODED, ''):
# Normal signing
if not contenttype:
r.headers['Content-Type'] = CONTENT_TYPE_FORM_URLENCODED
r.url, r.headers, r.data = self.client.sign(
unicode(r.full_url), unicode(r.method), r.data, r.headers)
else:
_oauth_signed = False
if _oauth_signed:
# Both flows add params to the URL by using r.full_url,
# so this prevents adding it again later
r.params = {}
# Having the authorization header, key or value, in unicode will
# result in UnicodeDecodeErrors when the request is concatenated
# by httplib. This can easily be seen when attaching files.
# Note that simply encoding the value is not enough since Python
# saves the type of first key set. Thus we remove and re-add.
# >>> d = {u'a':u'foo'}
# >>> d['a'] = 'foo'
# >>> d
# { u'a' : 'foo' }
u_header = unicode('Authorization')
if u_header in r.headers:
auth_header = r.headers[u_header].encode('utf-8')
del r.headers[u_header]
r.headers['Authorization'] = auth_header
return r
class HTTPBasicAuth(AuthBase):
"""Attaches HTTP Basic Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
def __call__(self, r):
r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPProxyAuth(HTTPBasicAuth):
"""Attaches HTTP Proxy Authenetication to a given Request object."""
def __call__(self, r):
r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
return r
class HTTPDigestAuth(AuthBase):
"""Attaches HTTP Digest Authentication to the given Request object."""
def __init__(self, username, password):
self.username = username
self.password = password
self.last_nonce = ''
self.nonce_count = 0
self.chal = {}
def build_digest_header(self, method, url):
realm = self.chal['realm']
nonce = self.chal['nonce']
qop = self.chal.get('qop')
algorithm = self.chal.get('algorithm', 'MD5')
opaque = self.chal.get('opaque', None)
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
def md5_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.md5(x).hexdigest()
hash_utf8 = md5_utf8
elif algorithm == 'SHA':
def sha_utf8(x):
if isinstance(x, str):
x = x.encode('utf-8')
return hashlib.sha1(x).hexdigest()
hash_utf8 = sha_utf8
# XXX MD5-sess
KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
if hash_utf8 is None:
return None
# XXX not implemented yet
entdig = None
p_parsed = urlparse(url)
path = p_parsed.path
if p_parsed.query:
path += '?' + p_parsed.query
A1 = '%s:%s:%s' % (self.username, realm, self.password)
A2 = '%s:%s' % (method, path)
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
ncvalue = '%08x' % self.nonce_count
s = str(self.nonce_count).encode('utf-8')
s += nonce.encode('utf-8')
s += time.ctime().encode('utf-8')
s += os.urandom(8)
cnonce = (hashlib.sha1(s).hexdigest()[:16])
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, hash_utf8(A2))
respdig = KD(hash_utf8(A1), noncebit)
elif qop is None:
respdig = KD(hash_utf8(A1), "%s:%s" % (nonce, hash_utf8(A2)))
else:
# XXX handle auth-int.
return None
self.last_nonce = nonce
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (self.username, realm, nonce, path, respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return 'Digest %s' % (base)
def handle_401(self, r):
"""Takes the given response and tries digest-auth, if needed."""
num_401_calls = r.request.hooks['response'].count(self.handle_401)
s_auth = r.headers.get('www-authenticate', '')
if 'digest' in s_auth.lower() and num_401_calls < 2:
self.chal = parse_dict_header(s_auth.replace('Digest ', ''))
# Consume content and release the original connection
# to allow our new request to reuse the same one.
r.content
r.raw.release_conn()
r.request.headers['Authorization'] = self.build_digest_header(r.request.method, r.request.url)
r.request.send(anyway=True)
_r = r.request.response
_r.history.append(r)
return _r
return r
def __call__(self, r):
# If we have a saved nonce, skip the 401
if self.last_nonce:
r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
r.register_hook('response', self.handle_401)
return r
def _negotiate_value(r):
"""Extracts the gssapi authentication token from the appropriate header"""
authreq = r.headers.get('www-authenticate', None)
if authreq:
rx = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I)
mo = rx.search(authreq)
if mo:
return mo.group(1)
return None
class HTTPKerberosAuth(AuthBase):
"""Attaches HTTP GSSAPI/Kerberos Authentication to the given Request object."""
def __init__(self, require_mutual_auth=True):
if k is None:
raise Exception("Kerberos libraries unavailable")
self.context = None
self.require_mutual_auth = require_mutual_auth
def generate_request_header(self, r):
"""Generates the gssapi authentication token with kerberos"""
host = urlparse(r.url).netloc
tail, _, head = host.rpartition(':')
domain = tail if tail else head
result, self.context = k.authGSSClientInit("HTTP@%s" % domain)
if result < 1:
raise Exception("authGSSClientInit failed")
result = k.authGSSClientStep(self.context, _negotiate_value(r))
if result < 0:
raise Exception("authGSSClientStep failed")
response = k.authGSSClientResponse(self.context)
return "Negotiate %s" % response
def authenticate_user(self, r):
"""Handles user authentication with gssapi/kerberos"""
auth_header = self.generate_request_header(r)
log.debug("authenticate_user(): Authorization header: %s" % auth_header)
r.request.headers['Authorization'] = auth_header
r.request.send(anyway=True)
_r = r.request.response
_r.history.append(r)
log.debug("authenticate_user(): returning %s" % _r)
return _r
def handle_401(self, r):
"""Handles 401's, attempts to use gssapi/kerberos authentication"""
log.debug("handle_401(): Handling: 401")
if _negotiate_value(r) is not None:
_r = self.authenticate_user(r)
log.debug("handle_401(): returning %s" % _r)
return _r
else:
log.debug("handle_401(): Kerberos is not supported")
log.debug("handle_401(): returning %s" % r)
return r
def handle_other(self, r):
"""Handles all responses with the exception of 401s.
This is necessary so that we can authenticate responses if requested"""
log.debug("handle_other(): Handling: %d" % r.status_code)
self.deregister(r)
if self.require_mutual_auth:
if _negotiate_value(r) is not None:
log.debug("handle_other(): Authenticating the server")
_r = self.authenticate_server(r)
log.debug("handle_other(): returning %s" % _r)
return _r
else:
log.error("handle_other(): Mutual authentication failed")
raise Exception("Mutual authentication failed")
else:
log.debug("handle_other(): returning %s" % r)
return r
def authenticate_server(self, r):
"""Uses GSSAPI to authenticate the server"""
log.debug("authenticate_server(): Authenticate header: %s" % _negotiate_value(r))
result = k.authGSSClientStep(self.context, _negotiate_value(r))
if result < 1:
raise Exception("authGSSClientStep failed")
_r = r.request.response
log.debug("authenticate_server(): returning %s" % _r)
return _r
def handle_response(self, r):
"""Takes the given response and tries kerberos-auth, as needed."""
if r.status_code == 401:
_r = self.handle_401(r)
log.debug("handle_response returning %s" % _r)
return _r
else:
_r = self.handle_other(r)
log.debug("handle_response returning %s" % _r)
return _r
log.debug("handle_response returning %s" % r)
return r
def deregister(self, r):
"""Deregisters the response handler"""
r.request.deregister_hook('response', self.handle_response)
def __call__(self, r):
r.register_hook('response', self.handle_response)
return r
| |
import pytest
import numpy as np
import quimb as qu
import quimb.tensor as qtn
def rand_reg_graph(reg, n, seed=None):
import networkx as nx
G = nx.random_regular_graph(reg, n, seed=seed)
return G
def graph_to_circ(G, gamma0=-0.743043, beta0=0.754082):
n = G.number_of_nodes()
# add all the gates
circ = f"{n}\n"
for i in range(n):
circ += f"H {i}\n"
for i, j in G.edges:
circ += f"CNOT {i} {j}\n"
circ += f"Rz {gamma0} {j}\n"
circ += f"CNOT {i} {j}\n"
for i in range(n):
circ += f"Rx {beta0} {i}\n"
return circ
class TestCircuit:
def test_prepare_GHZ(self):
qc = qtn.Circuit(3)
gates = [
('H', 0),
('H', 1),
('CNOT', 1, 2),
('CNOT', 0, 2),
('H', 0),
('H', 1),
('H', 2),
]
qc.apply_gates(gates)
assert qu.expec(qc.psi.to_dense(), qu.ghz_state(3)) == pytest.approx(1)
counts = qc.simulate_counts(1024)
assert len(counts) == 2
assert '000' in counts
assert '111' in counts
assert counts['000'] + counts['111'] == 1024
def test_rand_reg_qaoa(self):
G = rand_reg_graph(reg=3, n=18, seed=42)
qasm = graph_to_circ(G)
qc = qtn.Circuit.from_qasm(qasm)
assert (qc.psi.H & qc.psi) ^ all == pytest.approx(1.0)
def test_rand_reg_qaoa_mps_swapsplit(self):
G = rand_reg_graph(reg=3, n=18, seed=42)
qasm = graph_to_circ(G)
qc = qtn.CircuitMPS.from_qasm(qasm)
assert len(qc.psi.tensors) == 18
assert (qc.psi.H & qc.psi) ^ all == pytest.approx(1.0)
@pytest.mark.parametrize(
'Circ', [qtn.Circuit, qtn.CircuitMPS, qtn.CircuitDense]
)
def test_all_gate_methods(self, Circ):
import random
g_nq_np = [
# single qubit
('x', 1, 0),
('y', 1, 0),
('z', 1, 0),
('s', 1, 0),
('t', 1, 0),
('h', 1, 0),
('iden', 1, 0),
('x_1_2', 1, 0),
('y_1_2', 1, 0),
('z_1_2', 1, 0),
('w_1_2', 1, 0),
('hz_1_2', 1, 0),
# single qubit parametrizable
('rx', 1, 1),
('ry', 1, 1),
('rz', 1, 1),
# two qubit
('cx', 2, 0),
('cy', 2, 0),
('cz', 2, 0),
('cnot', 2, 0),
('swap', 2, 0),
('iswap', 2, 0),
# two qubit parametrizable
('fsim', 2, 2),
]
random.shuffle(g_nq_np)
psi0 = qtn.MPS_rand_state(2, 2)
circ = Circ(2, psi0, tags='PSI0')
for g, n_q, n_p in g_nq_np:
args = [
*np.random.uniform(0, 2 * np.pi, size=n_p),
*np.random.choice([0, 1], replace=False, size=n_q)
]
getattr(circ, g)(*args)
assert circ.psi.H @ circ.psi == pytest.approx(1.0)
assert abs((circ.psi.H & psi0) ^ all) < 0.99999999
def test_auto_split_gate(self):
n = 3
ops = [
('u3', 1., 2., 3., 0),
('u3', 2., 3., 1., 1),
('u3', 3., 1., 2., 2),
('cz', 0, 1),
('iswap', 1, 2),
('cx', 2, 0),
('iswap', 2, 1),
('h', 0),
('h', 1),
('h', 2),
]
cnorm = qtn.Circuit(n, gate_opts=dict(contract='split-gate'))
cnorm.apply_gates(ops)
assert cnorm.psi.max_bond() == 4
cswap = qtn.Circuit(n, gate_opts=dict(contract='swap-split-gate'))
cswap.apply_gates(ops)
assert cswap.psi.max_bond() == 4
cauto = qtn.Circuit(n, gate_opts=dict(contract='auto-split-gate'))
cauto.apply_gates(ops)
assert cauto.psi.max_bond() == 2
assert qu.fidelity(cnorm.psi.to_dense(),
cswap.psi.to_dense()) == pytest.approx(1.0)
assert qu.fidelity(cswap.psi.to_dense(),
cauto.psi.to_dense()) == pytest.approx(1.0)
@pytest.mark.parametrize("gate2", ['cx', 'iswap'])
def test_circuit_simplify_tensor_network(self, gate2):
import random
import itertools
depth = n = 8
circ = qtn.Circuit(n)
def random_single_qubit_layer():
return [
(random.choice(['X_1_2', 'Y_1_2', 'W_1_2']), i)
for i in range(n)
]
def even_two_qubit_layer():
return [
(gate2, i, i + 1)
for i in range(0, n, 2)
]
def odd_two_qubit_layer():
return [
(gate2, i, i + 1)
for i in range(1, n - 1, 2)
]
layering = itertools.cycle([
random_single_qubit_layer,
even_two_qubit_layer,
random_single_qubit_layer,
odd_two_qubit_layer,
])
for i, layer_fn in zip(range(depth), layering):
for g in layer_fn():
circ.apply_gate(*g, gate_round=i)
psif = qtn.MPS_computational_state('0' * n).squeeze_()
tn = circ.psi & psif
c = tn.contract(all)
cw = tn.contraction_width()
tn_s = tn.full_simplify()
assert tn_s.num_tensors < tn.num_tensors
assert tn_s.num_indices < tn.num_indices
# need to specify output inds since we now have hyper edges
c_s = tn_s.contract(all, output_inds=[])
assert c_s == pytest.approx(c)
cw_s = tn_s.contraction_width(output_inds=[])
assert cw_s <= cw
class TestCircuitGen:
@pytest.mark.parametrize(
"ansatz,cyclic", [
('zigzag', False),
('brickwork', False),
('brickwork', True),
('rand', False),
('rand', True),
])
@pytest.mark.parametrize('n', [4, 5])
def test_1D_ansatzes(self, ansatz, cyclic, n):
depth = 3
num_pairs = n if cyclic else n - 1
fn = {
'zigzag': qtn.circ_ansatz_1D_zigzag,
'brickwork': qtn.circ_ansatz_1D_brickwork,
'rand': qtn.circ_ansatz_1D_rand,
}[ansatz]
opts = dict(
n=n,
depth=3,
gate_opts=dict(contract=False),
)
if cyclic:
opts['cyclic'] = True
if ansatz == 'rand':
opts['seed'] = 42
circ = fn(**opts)
tn = circ.uni
# total number of entangling gates
assert len(tn['CZ']) == num_pairs * depth
# number of entangling gates per pair
for i in range(num_pairs):
assert len(tn['CZ', f'I{i}', f'I{(i + 1) % n}']) == depth
assert all(isinstance(t, qtn.PTensor) for t in tn['U3'])
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import matrix
from ducktape.utils.util import wait_until
from kafkatest.tests.verifiable_consumer_test import VerifiableConsumerTest
from kafkatest.services.kafka import TopicPartition
import signal
class OffsetValidationTest(VerifiableConsumerTest):
TOPIC = "test_topic"
NUM_PARTITIONS = 1
def __init__(self, test_context):
super(OffsetValidationTest, self).__init__(test_context, num_consumers=3, num_producers=1,
num_zk=1, num_brokers=2, topics={
self.TOPIC : { 'partitions': self.NUM_PARTITIONS, 'replication-factor': 2 }
})
def rolling_bounce_consumers(self, consumer, num_bounces=5, clean_shutdown=True):
for _ in range(num_bounces):
for node in consumer.nodes:
consumer.stop_node(node, clean_shutdown)
wait_until(lambda: len(consumer.dead_nodes()) == 1,
timeout_sec=self.session_timeout_sec+5,
err_msg="Timed out waiting for the consumer to shutdown")
consumer.start_node(node)
self.await_all_members(consumer)
self.await_consumed_messages(consumer)
def bounce_all_consumers(self, consumer, num_bounces=5, clean_shutdown=True):
for _ in range(num_bounces):
for node in consumer.nodes:
consumer.stop_node(node, clean_shutdown)
wait_until(lambda: len(consumer.dead_nodes()) == self.num_consumers, timeout_sec=10,
err_msg="Timed out waiting for the consumers to shutdown")
for node in consumer.nodes:
consumer.start_node(node)
self.await_all_members(consumer)
self.await_consumed_messages(consumer)
def rolling_bounce_brokers(self, consumer, num_bounces=5, clean_shutdown=True):
for _ in range(num_bounces):
for node in self.kafka.nodes:
self.kafka.restart_node(node, clean_shutdown=True)
self.await_all_members(consumer)
self.await_consumed_messages(consumer)
def setup_consumer(self, topic, **kwargs):
# collect verifiable consumer events since this makes debugging much easier
consumer = super(OffsetValidationTest, self).setup_consumer(topic, **kwargs)
self.mark_for_collect(consumer, 'verifiable_consumer_stdout')
return consumer
def test_broker_rolling_bounce(self):
"""
Verify correct consumer behavior when the brokers are consecutively restarted.
Setup: single Kafka cluster with one producer writing messages to a single topic with one
partition, an a set of consumers in the same group reading from the same topic.
- Start a producer which continues producing new messages throughout the test.
- Start up the consumers and wait until they've joined the group.
- In a loop, restart each broker consecutively, waiting for the group to stabilize between
each broker restart.
- Verify delivery semantics according to the failure type and that the broker bounces
did not cause unexpected group rebalances.
"""
partition = TopicPartition(self.TOPIC, 0)
producer = self.setup_producer(self.TOPIC)
consumer = self.setup_consumer(self.TOPIC)
producer.start()
self.await_produced_messages(producer)
consumer.start()
self.await_all_members(consumer)
num_rebalances = consumer.num_rebalances()
# TODO: make this test work with hard shutdowns, which probably requires
# pausing before the node is restarted to ensure that any ephemeral
# nodes have time to expire
self.rolling_bounce_brokers(consumer, clean_shutdown=True)
unexpected_rebalances = consumer.num_rebalances() - num_rebalances
assert unexpected_rebalances == 0, \
"Broker rolling bounce caused %d unexpected group rebalances" % unexpected_rebalances
consumer.stop_all()
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records did not match consumed position"
@matrix(clean_shutdown=[True, False], bounce_mode=["all", "rolling"])
def test_consumer_bounce(self, clean_shutdown, bounce_mode):
"""
Verify correct consumer behavior when the consumers in the group are consecutively restarted.
Setup: single Kafka cluster with one producer and a set of consumers in one group.
- Start a producer which continues producing new messages throughout the test.
- Start up the consumers and wait until they've joined the group.
- In a loop, restart each consumer, waiting for each one to rejoin the group before
restarting the rest.
- Verify delivery semantics according to the failure type.
"""
partition = TopicPartition(self.TOPIC, 0)
producer = self.setup_producer(self.TOPIC)
consumer = self.setup_consumer(self.TOPIC)
producer.start()
self.await_produced_messages(producer)
consumer.start()
self.await_all_members(consumer)
if bounce_mode == "all":
self.bounce_all_consumers(consumer, clean_shutdown=clean_shutdown)
else:
self.rolling_bounce_consumers(consumer, clean_shutdown=clean_shutdown)
consumer.stop_all()
if clean_shutdown:
# if the total records consumed matches the current position, we haven't seen any duplicates
# this can only be guaranteed with a clean shutdown
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records did not match consumed position"
else:
# we may have duplicates in a hard failure
assert consumer.current_position(partition) <= consumer.total_consumed(), \
"Current position greater than the total number of consumed records"
@matrix(clean_shutdown=[True, False], enable_autocommit=[True, False])
def test_consumer_failure(self, clean_shutdown, enable_autocommit):
partition = TopicPartition(self.TOPIC, 0)
consumer = self.setup_consumer(self.TOPIC, enable_autocommit=enable_autocommit)
producer = self.setup_producer(self.TOPIC)
consumer.start()
self.await_all_members(consumer)
partition_owner = consumer.owner(partition)
assert partition_owner is not None
# startup the producer and ensure that some records have been written
producer.start()
self.await_produced_messages(producer)
# stop the partition owner and await its shutdown
consumer.kill_node(partition_owner, clean_shutdown=clean_shutdown)
wait_until(lambda: len(consumer.joined_nodes()) == (self.num_consumers - 1) and consumer.owner(partition) != None,
timeout_sec=self.session_timeout_sec+5, err_msg="Timed out waiting for consumer to close")
# ensure that the remaining consumer does some work after rebalancing
self.await_consumed_messages(consumer, min_messages=1000)
consumer.stop_all()
if clean_shutdown:
# if the total records consumed matches the current position, we haven't seen any duplicates
# this can only be guaranteed with a clean shutdown
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records did not match consumed position"
else:
# we may have duplicates in a hard failure
assert consumer.current_position(partition) <= consumer.total_consumed(), \
"Current position greater than the total number of consumed records"
# if autocommit is not turned on, we can also verify the last committed offset
if not enable_autocommit:
assert consumer.last_commit(partition) == consumer.current_position(partition), \
"Last committed offset did not match last consumed position"
@matrix(clean_shutdown=[True, False], enable_autocommit=[True, False])
def test_broker_failure(self, clean_shutdown, enable_autocommit):
partition = TopicPartition(self.TOPIC, 0)
consumer = self.setup_consumer(self.TOPIC, enable_autocommit=enable_autocommit)
producer = self.setup_producer(self.TOPIC)
producer.start()
consumer.start()
self.await_all_members(consumer)
num_rebalances = consumer.num_rebalances()
# shutdown one of the brokers
# TODO: we need a way to target the coordinator instead of picking arbitrarily
self.kafka.signal_node(self.kafka.nodes[0], signal.SIGTERM if clean_shutdown else signal.SIGKILL)
# ensure that the consumers do some work after the broker failure
self.await_consumed_messages(consumer, min_messages=1000)
# verify that there were no rebalances on failover
assert num_rebalances == consumer.num_rebalances(), "Broker failure should not cause a rebalance"
consumer.stop_all()
# if the total records consumed matches the current position, we haven't seen any duplicates
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records did not match consumed position"
# if autocommit is not turned on, we can also verify the last committed offset
if not enable_autocommit:
assert consumer.last_commit(partition) == consumer.current_position(partition), \
"Last committed offset did not match last consumed position"
def test_group_consumption(self):
"""
Verifies correct group rebalance behavior as consumers are started and stopped.
In particular, this test verifies that the partition is readable after every
expected rebalance.
Setup: single Kafka cluster with a group of consumers reading from one topic
with one partition while the verifiable producer writes to it.
- Start the consumers one by one, verifying consumption after each rebalance
- Shutdown the consumers one by one, verifying consumption after each rebalance
"""
consumer = self.setup_consumer(self.TOPIC)
producer = self.setup_producer(self.TOPIC)
partition = TopicPartition(self.TOPIC, 0)
producer.start()
for num_started, node in enumerate(consumer.nodes, 1):
consumer.start_node(node)
self.await_members(consumer, num_started)
self.await_consumed_messages(consumer)
for num_stopped, node in enumerate(consumer.nodes, 1):
consumer.stop_node(node)
if num_stopped < self.num_consumers:
self.await_members(consumer, self.num_consumers - num_stopped)
self.await_consumed_messages(consumer)
assert consumer.current_position(partition) == consumer.total_consumed(), \
"Total consumed records did not match consumed position"
assert consumer.last_commit(partition) == consumer.current_position(partition), \
"Last committed offset did not match last consumed position"
class AssignmentValidationTest(VerifiableConsumerTest):
TOPIC = "test_topic"
NUM_PARTITIONS = 6
def __init__(self, test_context):
super(AssignmentValidationTest, self).__init__(test_context, num_consumers=3, num_producers=0,
num_zk=1, num_brokers=2, topics={
self.TOPIC : { 'partitions': self.NUM_PARTITIONS, 'replication-factor': 1 },
})
@matrix(assignment_strategy=["org.apache.kafka.clients.consumer.RangeAssignor",
"org.apache.kafka.clients.consumer.RoundRobinAssignor"])
def test_valid_assignment(self, assignment_strategy):
"""
Verify assignment strategy correctness: each partition is assigned to exactly
one consumer instance.
Setup: single Kafka cluster with a set of consumers in the same group.
- Start the consumers one by one
- Validate assignment after every expected rebalance
"""
consumer = self.setup_consumer(self.TOPIC, assignment_strategy=assignment_strategy)
for num_started, node in enumerate(consumer.nodes, 1):
consumer.start_node(node)
self.await_members(consumer, num_started)
assert self.valid_assignment(self.TOPIC, self.NUM_PARTITIONS, consumer.current_assignment())
| |
import numpy as np # linear algebra
import tensorflow as tf
import glob
import collections as col
import itertools
import cPickle as pkl
import time
import os
import sys
import getopt
### reading frame level tfrecords
def get_serialized_example(filepath):
filename_queue = tf.train.string_input_producer([filepath], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
return(serialized_example)
def get_raw_frame_data(serialized_example):
contexts, features = tf.parse_single_sequence_example(
serialized_example,
context_features={
"video_id": tf.FixedLenFeature([], tf.string),
"labels": tf.VarLenFeature(tf.int64)
},
sequence_features={
"rgb" : tf.FixedLenSequenceFeature([], dtype=tf.string),
"audio" : tf.FixedLenSequenceFeature([], dtype=tf.string)
}
)
video_id = contexts['video_id']
labels = contexts['labels']
rgb = tf.reshape(tf.cast(tf.decode_raw(features['rgb'], tf.uint8), tf.float32),[-1, 1024])
audio = tf.reshape(tf.cast(tf.decode_raw(features['audio'], tf.uint8), tf.float32),[-1, 128])
return(video_id, labels, rgb, audio)
def get_processed_frame_data(rgb_frame, audio_frame, feature_list, concat_features=False):
rgb_frame_trans = tf.transpose(rgb_frame, perm=[1, 0])
audio_frame_trans = tf.transpose(audio_frame, perm=[1, 0])
video_length = tf.shape(rgb_frame)[0]
q0_rgb_frame = tf.reduce_min(rgb_frame, reduction_indices=0)
q1_rgb_frame = tf.reduce_min(tf.nn.top_k(rgb_frame_trans, k = tf.to_int32(tf.scalar_mul(0.75, tf.to_float(video_length))), sorted=False).values, reduction_indices=1)
q2_rgb_frame = tf.reduce_min(tf.nn.top_k(rgb_frame_trans, k = tf.to_int32(tf.scalar_mul(0.50, tf.to_float(video_length))), sorted=False).values, reduction_indices=1)
q3_rgb_frame = tf.reduce_min(tf.nn.top_k(rgb_frame_trans, k = tf.to_int32(tf.scalar_mul(0.25, tf.to_float(video_length))), sorted=False).values, reduction_indices=1)
q4_rgb_frame = tf.reduce_max(rgb_frame, reduction_indices=0)
mean_rgb_frame = tf.reduce_mean(rgb_frame, reduction_indices=0)
stddv_rgb_frame = tf.sqrt(tf.reduce_mean(tf.square(rgb_frame - mean_rgb_frame), reduction_indices=0))
skew_rgb_frame = tf.div(tf.reduce_mean(tf.pow(rgb_frame - mean_rgb_frame, 3), reduction_indices=0), tf.pow(stddv_rgb_frame, 3))
kurt_rgb_frame = tf.div(tf.reduce_mean(tf.pow(rgb_frame - mean_rgb_frame, 4), reduction_indices=0), tf.pow(stddv_rgb_frame, 4))
q0_audio_frame = tf.reduce_min(audio_frame, reduction_indices=0)
q1_audio_frame = tf.reduce_min(tf.nn.top_k(audio_frame_trans, k = tf.to_int32(tf.scalar_mul(0.75, tf.to_float(video_length))), sorted=False).values, reduction_indices=1)
q2_audio_frame = tf.reduce_min(tf.nn.top_k(audio_frame_trans, k = tf.to_int32(tf.scalar_mul(0.50, tf.to_float(video_length))), sorted=False).values, reduction_indices=1)
q3_audio_frame = tf.reduce_min(tf.nn.top_k(audio_frame_trans, k = tf.to_int32(tf.scalar_mul(0.25, tf.to_float(video_length))), sorted=False).values, reduction_indices=1)
q4_audio_frame = tf.reduce_max(audio_frame, reduction_indices=0)
mean_audio_frame = tf.reduce_mean(audio_frame, reduction_indices=0)
stddv_audio_frame = tf.sqrt(tf.reduce_mean(tf.square(audio_frame - mean_audio_frame), reduction_indices=0))
skew_audio_frame = tf.div(tf.reduce_mean(tf.pow(audio_frame - mean_audio_frame, 3), reduction_indices=0), tf.pow(stddv_audio_frame, 3))
kurt_audio_frame = tf.div(tf.reduce_mean(tf.pow(audio_frame - mean_audio_frame, 4), reduction_indices=0), tf.pow(stddv_audio_frame, 4))
iqr_rgb_frame = tf.subtract(q3_rgb_frame, q1_rgb_frame)
rng_rgb_frame = tf.subtract(q4_rgb_frame, q0_rgb_frame)
iqr_audio_frame = tf.subtract(q3_audio_frame, q1_audio_frame)
rng_audio_frame = tf.subtract(q4_audio_frame, q0_audio_frame)
coeffvar_rgb_frame = tf.div(stddv_rgb_frame, mean_rgb_frame)
efficiency_rgb_frame = tf.div(tf.square(stddv_rgb_frame), tf.square(mean_rgb_frame))
midhinge_rgb_frame = tf.add(q3_rgb_frame, q1_rgb_frame)
qntcoeffdisp_rgb_frame = tf.div(iqr_rgb_frame, midhinge_rgb_frame)
coeffvar_audio_frame = tf.div(stddv_audio_frame, mean_audio_frame)
efficiency_audio_frame = tf.div(tf.square(stddv_audio_frame), tf.square(mean_audio_frame))
midhinge_audio_frame = tf.add(q3_audio_frame, q1_audio_frame)
qntcoeffdisp_audio_frame = tf.div(iqr_audio_frame, midhinge_audio_frame)
# Mean Absolute Difference
md_rgb_frame = tf.div(tf.reduce_sum(tf.abs(tf.matrix_band_part(tf.subtract(tf.expand_dims(rgb_frame_trans, 2), tf.expand_dims(rgb_frame_trans, 1)), 0, -1)), reduction_indices=[1,2]), tf.cast(tf.multiply(video_length, video_length-1), tf.float32))
# Median Absolute Deviation around Median
abs_dev_median = tf.transpose(tf.abs(tf.subtract(rgb_frame, q2_rgb_frame)), perm=[1,0])
mean_abs_med_rgb_frame = tf.reduce_min(tf.nn.top_k(abs_dev_median, k = tf.to_int32(tf.scalar_mul(0.50, tf.to_float(video_length))), sorted=False).values, reduction_indices=1)
# Mean Absolute Deviation around Mean
mean_abs_mean_rgb_frame = tf.reduce_mean(tf.abs(tf.subtract(rgb_frame, mean_rgb_frame)), reduction_indices=0)
# Mean Absolute Deviation around Median
mean_abs_mean_rgb_frame = tf.reduce_mean(tf.abs(tf.subtract(rgb_frame, mean_rgb_frame)), reduction_indices=0)
# Mean Absolute Deviation around Mode
mean_abs_mean_rgb_frame = tf.reduce_mean(tf.abs(tf.subtract(rgb_frame, mean_rgb_frame)), reduction_indices=0)
pairwise_man, _ = tf.unique(tf.reshape(tf.matrix_band_part(tf.reduce_sum(tf.abs(tf.subtract(tf.expand_dims(rgb_frame, 0), tf.expand_dims(rgb_frame, 1))), reduction_indices=[2]), 0, -1), [-1]))
local_features = locals()
if(concat_features):
features = []
for x in feature_list:
if x != 'video_length':
features.append(local_features[x])
else:
features.append(tf.cast(tf.convert_to_tensor([video_length]), tf.float32))
features = tf.concat(features, 0)
else:
features = {feature : local_features[feature] for feature in feature_list}
return(features)
def extract_video_features_from_frame_features(cluster_features=False):
start_time = time.time()
# filepaths = glob.glob('/data1/frame_level_feat/train*.tfrecord')
opts, _ = getopt.getopt(sys.argv[1:],"",["output_dir=", "input_dir=", "input_file=", "output_file=", "videos_done_filepath="])
for opt, arg in opts:
if opt in ("--input_file"):
input_file = arg
if opt in ("--input_dir"):
input_dir = arg
if opt in ("--output_dir"):
output_dir = arg
if opt in ("--output_file"):
output_file = arg
if opt in ("--videos_done_filepath"):
videos_done_filepath = arg
# filepaths to do
f = file(input_file, 'rb')
records_todo = pkl.load(f)
f.close()
# filepaths done
f = file(output_file, 'rb')
records_done = pkl.load(f)
f.close()
# videos done
f = file(videos_done_filepath, 'rb')
videos_done = pkl.load(f)
f.close()
for record in records_todo:
if record in records_done:
print(record + ' : Skipped')
print(len(records_done)/float(len(records_todo)))
continue
filepath = input_dir+record
serialized_example = get_serialized_example(filepath)
raw_frame_data = get_raw_frame_data(serialized_example)
feature_list = ['video_length',
'q0_rgb_frame', 'q1_rgb_frame', 'q2_rgb_frame', 'q3_rgb_frame', 'q4_rgb_frame', 'mean_rgb_frame', 'stddv_rgb_frame', 'skew_rgb_frame', 'kurt_rgb_frame', 'iqr_rgb_frame', 'rng_rgb_frame', 'coeffvar_rgb_frame', 'efficiency_rgb_frame', 'midhinge_rgb_frame', 'qntcoeffdisp_rgb_frame',
'q0_audio_frame', 'q1_audio_frame', 'q2_audio_frame', 'q3_audio_frame', 'q4_audio_frame', 'mean_audio_frame', 'stddv_audio_frame', 'skew_audio_frame', 'kurt_audio_frame', 'iqr_audio_frame', 'rng_audio_frame', 'coeffvar_audio_frame', 'efficiency_audio_frame', 'midhinge_audio_frame', 'qntcoeffdisp_audio_frame']
processed_frame_data = get_processed_frame_data(raw_frame_data[2], raw_frame_data[3], feature_list, concat_features=False)
# writing tfrecord v1
new_filepath = output_dir+record
writer = tf.python_io.TFRecordWriter(new_filepath)
with tf.Session() as sess:
init_op = tf.group(tf.global_variables_initializer(),tf.local_variables_initializer())
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
try:
while True:
video_id, labels, features = sess.run([raw_frame_data[0], raw_frame_data[1], processed_frame_data])
# writing tfrecord v1
features_to_write = {key : value if key != 'video_length' else [value] for key, value in features.items()}
features_to_write['video_id'] = [video_id]
features_to_write['labels'] = labels.values
tf_features_format = {}
for key, value in features_to_write.items():
if key == 'video_id':
tf_features_format[key] = tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
elif key == 'labels':
tf_features_format[key] = tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
tf_features_format[key] = tf.train.Feature(float_list=tf.train.FloatList(value=value))
example = tf.train.Example(features=tf.train.Features(feature=tf_features_format))
writer.write(example.SerializeToString())
videos_done[video_id+"_"+record] = 1
except tf.errors.OutOfRangeError, e:
coord.request_stop(e)
finally:
coord.request_stop()
coord.join(threads)
print(record + ' : Done')
records_done[record] = 1
print(len(records_done)/float(len(records_todo)))
f = file(output_file, 'wb')
pkl.dump(records_done, f, protocol=pkl.HIGHEST_PROTOCOL)
f.close()
f = file(videos_done_filepath, 'wb')
pkl.dump(videos_done, f, protocol=pkl.HIGHEST_PROTOCOL)
f.close()
# writing tfrecord v1
writer.close()
print(time.time() - start_time)
extract_video_features_from_frame_features()
| |
"""Helper methods for various modules."""
import asyncio
from datetime import datetime, timedelta
import threading
import re
import enum
import socket
import random
import string
from functools import wraps
from types import MappingProxyType
from typing import (Any, Optional, TypeVar, Callable, KeysView, Union, # noqa
Iterable, List, Dict, Iterator, Coroutine, MutableSet)
import slugify as unicode_slug
from .dt import as_local, utcnow
# pylint: disable=invalid-name
T = TypeVar('T')
U = TypeVar('U')
ENUM_T = TypeVar('ENUM_T', bound=enum.Enum)
# pylint: enable=invalid-name
RE_SANITIZE_FILENAME = re.compile(r'(~|\.\.|/|\\)')
RE_SANITIZE_PATH = re.compile(r'(~|\.(\.)+)')
def sanitize_filename(filename: str) -> str:
r"""Sanitize a filename by removing .. / and \\."""
return RE_SANITIZE_FILENAME.sub("", filename)
def sanitize_path(path: str) -> str:
"""Sanitize a path by removing ~ and .."""
return RE_SANITIZE_PATH.sub("", path)
def slugify(text: str) -> str:
"""Slugify a given text."""
return unicode_slug.slugify(text, separator='_') # type: ignore
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
if isinstance(inp, datetime):
return as_local(inp).isoformat()
return str(inp)
def convert(value: Optional[T], to_type: Callable[[T], U],
default: Optional[U] = None) -> Optional[U]:
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(preferred_string: str, current_strings:
Union[Iterable[str], KeysView[str]]) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = "{}_{}".format(preferred_string, tries)
return test_string
# Taken from: http://stackoverflow.com/a/11735897
def get_local_ip() -> str:
"""Try to determine the local IP address of the machine."""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Use Google Public DNS server to determine own IP
sock.connect(('8.8.8.8', 80))
return sock.getsockname()[0] # type: ignore
except socket.error:
try:
return socket.gethostbyname(socket.gethostname())
except socket.gaierror:
return '127.0.0.1'
finally:
sock.close()
# Taken from http://stackoverflow.com/a/23728630
def get_random_string(length: int = 10) -> str:
"""Return a random string with letters and digits."""
generator = random.SystemRandom()
source_chars = string.ascii_letters + string.digits
return ''.join(generator.choice(source_chars) for _ in range(length))
class OrderedEnum(enum.Enum):
"""Taken from Python 3.4.0 docs."""
# https://github.com/PyCQA/pylint/issues/2306
# pylint: disable=comparison-with-callable
def __ge__(self, other: ENUM_T) -> bool:
"""Return the greater than element."""
if self.__class__ is other.__class__:
return bool(self.value >= other.value)
return NotImplemented
def __gt__(self, other: ENUM_T) -> bool:
"""Return the greater element."""
if self.__class__ is other.__class__:
return bool(self.value > other.value)
return NotImplemented
def __le__(self, other: ENUM_T) -> bool:
"""Return the lower than element."""
if self.__class__ is other.__class__:
return bool(self.value <= other.value)
return NotImplemented
def __lt__(self, other: ENUM_T) -> bool:
"""Return the lower element."""
if self.__class__ is other.__class__:
return bool(self.value < other.value)
return NotImplemented
class Throttle:
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more then 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
def __init__(self, min_time: timedelta,
limit_no_throttle: Optional[timedelta] = None) -> None:
"""Initialize the throttle."""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method: Callable) -> Callable:
"""Caller for the throttle."""
# Make sure we return a coroutine if the method is async.
if asyncio.iscoroutinefunction(method):
async def throttled_value() -> None:
"""Stand-in function for when real func is being throttled."""
return None
else:
def throttled_value() -> None: # type: ignore
"""Stand-in function for when real func is being throttled."""
return None
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname separated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (not hasattr(method, '__self__') and
'.' not in method.__qualname__.split('.<locals>.')[-1])
@wraps(method)
def wrapper(*args: Any, **kwargs: Any) -> Union[Callable, Coroutine]:
"""Wrap that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
"""
# pylint: disable=protected-access
if hasattr(method, '__self__'):
host = getattr(method, '__self__')
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
if not hasattr(host, '_throttle'):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
if not throttle[0].acquire(False):
return throttled_value()
# Check if method is never called or no_throttle is given
force = kwargs.pop('no_throttle', False) or not throttle[1]
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result # type: ignore
return throttled_value()
finally:
throttle[0].release()
return wrapper
| |
from ctypes import *
import unittest
import struct
def valid_ranges(*types):
# given a sequence of numeric types, collect their _type_
# attribute, which is a single format character compatible with
# the struct module, use the struct module to calculate the
# minimum and maximum value allowed for this format.
# Returns a list of (min, max) values.
result = []
for t in types:
fmt = t._type_
size = struct.calcsize(fmt)
a = struct.unpack(fmt, ("\x00"*32)[:size])[0]
b = struct.unpack(fmt, ("\xFF"*32)[:size])[0]
c = struct.unpack(fmt, ("\x7F"+"\x00"*32)[:size])[0]
d = struct.unpack(fmt, ("\x80"+"\xFF"*32)[:size])[0]
result.append((min(a, b, c, d), max(a, b, c, d)))
return result
ArgType = type(byref(c_int(0)))
unsigned_types = [c_ubyte, c_ushort, c_uint, c_ulong]
signed_types = [c_byte, c_short, c_int, c_long, c_longlong]
bool_types = []
float_types = [c_double, c_float]
try:
c_ulonglong
c_longlong
except NameError:
pass
else:
unsigned_types.append(c_ulonglong)
signed_types.append(c_longlong)
try:
c_bool
except NameError:
pass
else:
bool_types.append(c_bool)
unsigned_ranges = valid_ranges(*unsigned_types)
signed_ranges = valid_ranges(*signed_types)
bool_values = [True, False, 0, 1, -1, 5000, 'test', [], [1]]
################################################################
class NumberTestCase(unittest.TestCase):
def test_default_init(self):
# default values are set to zero
for t in signed_types + unsigned_types + float_types:
self.assertEqual(t().value, 0)
def test_unsigned_values(self):
# the value given to the constructor is available
# as the 'value' attribute
for t, (l, h) in zip(unsigned_types, unsigned_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_signed_values(self):
# see above
for t, (l, h) in zip(signed_types, signed_ranges):
self.assertEqual(t(l).value, l)
self.assertEqual(t(h).value, h)
def test_bool_values(self):
from operator import truth
for t, v in zip(bool_types, bool_values):
self.assertEqual(t(v).value, truth(v))
def test_typeerror(self):
# Only numbers are allowed in the contructor,
# otherwise TypeError is raised
for t in signed_types + unsigned_types + float_types:
self.assertRaises(TypeError, t, "")
self.assertRaises(TypeError, t, None)
## def test_valid_ranges(self):
## # invalid values of the correct type
## # raise ValueError (not OverflowError)
## for t, (l, h) in zip(unsigned_types, unsigned_ranges):
## self.assertRaises(ValueError, t, l-1)
## self.assertRaises(ValueError, t, h+1)
def test_from_param(self):
# the from_param class method attribute always
# returns PyCArgObject instances
for t in signed_types + unsigned_types + float_types:
self.assertEqual(ArgType, type(t.from_param(0)))
def test_byref(self):
# calling byref returns also a PyCArgObject instance
for t in signed_types + unsigned_types + float_types + bool_types:
parm = byref(t())
self.assertEqual(ArgType, type(parm))
def test_floats(self):
# c_float and c_double can be created from
# Python int, long and float
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
for t in float_types:
self.assertEqual(t(2.0).value, 2.0)
self.assertEqual(t(2).value, 2.0)
self.assertEqual(t(2L).value, 2.0)
self.assertEqual(t(f).value, 2.0)
def test_integers(self):
class FloatLike(object):
def __float__(self):
return 2.0
f = FloatLike()
class IntLike(object):
def __int__(self):
return 2
i = IntLike()
# integers cannot be constructed from floats,
# but from integer-like objects
for t in signed_types + unsigned_types:
self.assertRaises(TypeError, t, 3.14)
self.assertRaises(TypeError, t, f)
self.assertEqual(t(i).value, 2)
def test_sizes(self):
for t in signed_types + unsigned_types + float_types + bool_types:
try:
size = struct.calcsize(t._type_)
except struct.error:
continue
# sizeof of the type...
self.assertEqual(sizeof(t), size)
# and sizeof of an instance
self.assertEqual(sizeof(t()), size)
def test_alignments(self):
for t in signed_types + unsigned_types + float_types:
code = t._type_ # the typecode
align = struct.calcsize("c%c" % code) - struct.calcsize(code)
# alignment of the type...
self.assertEqual((code, alignment(t)),
(code, align))
# and alignment of an instance
self.assertEqual((code, alignment(t())),
(code, align))
def test_int_from_address(self):
from array import array
for t in signed_types + unsigned_types:
# the array module doesn't support all format codes
# (no 'q' or 'Q')
try:
array(t._type_)
except ValueError:
continue
a = array(t._type_, [100])
# v now is an integer at an 'external' memory location
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertEqual(type(v), t)
# changing the value at the memory location changes v's value also
a[0] = 42
self.assertEqual(v.value, a[0])
def test_float_from_address(self):
from array import array
for t in float_types:
a = array(t._type_, [3.14])
v = t.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertTrue(type(v) is t)
a[0] = 2.3456e17
self.assertEqual(v.value, a[0])
self.assertTrue(type(v) is t)
def test_char_from_address(self):
from ctypes import c_char
from array import array
a = array('c', 'x')
v = c_char.from_address(a.buffer_info()[0])
self.assertEqual(v.value, a[0])
self.assertTrue(type(v) is c_char)
a[0] = '?'
self.assertEqual(v.value, a[0])
# array does not support c_bool / 't'
# def test_bool_from_address(self):
# from ctypes import c_bool
# from array import array
# a = array(c_bool._type_, [True])
# v = t.from_address(a.buffer_info()[0])
# self.assertEqual(v.value, a[0])
# self.assertEqual(type(v) is t)
# a[0] = False
# self.assertEqual(v.value, a[0])
# self.assertEqual(type(v) is t)
def test_init(self):
# c_int() can be initialized from Python's int, and c_int.
# Not from c_long or so, which seems strange, abd should
# probably be changed:
self.assertRaises(TypeError, c_int, c_long(42))
## def test_perf(self):
## check_perf()
from ctypes import _SimpleCData
class c_int_S(_SimpleCData):
_type_ = "i"
__slots__ = []
def run_test(rep, msg, func, arg=None):
## items = [None] * rep
items = range(rep)
from time import clock
if arg is not None:
start = clock()
for i in items:
func(arg); func(arg); func(arg); func(arg); func(arg)
stop = clock()
else:
start = clock()
for i in items:
func(); func(); func(); func(); func()
stop = clock()
print "%15s: %.2f us" % (msg, ((stop-start)*1e6/5/rep))
def check_perf():
# Construct 5 objects
from ctypes import c_int
REP = 200000
run_test(REP, "int()", int)
run_test(REP, "int(999)", int)
run_test(REP, "c_int()", c_int)
run_test(REP, "c_int(999)", c_int)
run_test(REP, "c_int_S()", c_int_S)
run_test(REP, "c_int_S(999)", c_int_S)
# Python 2.3 -OO, win2k, P4 700 MHz:
#
# int(): 0.87 us
# int(999): 0.87 us
# c_int(): 3.35 us
# c_int(999): 3.34 us
# c_int_S(): 3.23 us
# c_int_S(999): 3.24 us
# Python 2.2 -OO, win2k, P4 700 MHz:
#
# int(): 0.89 us
# int(999): 0.89 us
# c_int(): 9.99 us
# c_int(999): 10.02 us
# c_int_S(): 9.87 us
# c_int_S(999): 9.85 us
if __name__ == '__main__':
## check_perf()
unittest.main()
| |
# type: ignore
import datetime
import platform
import sys
from typing import Any
from unittest import mock
import pytest
import aiohttp
from aiohttp import web
from aiohttp.abc import AbstractAccessLogger, AbstractAsyncAccessLogger
from aiohttp.typedefs import Handler
from aiohttp.web_log import AccessLogger
from aiohttp.web_response import Response
try:
from contextvars import ContextVar
except ImportError:
ContextVar = None
IS_PYPY: Any = platform.python_implementation() == "PyPy"
def test_access_logger_format() -> None:
log_format = '%T "%{ETag}o" %X {X} %%P'
mock_logger = mock.Mock()
access_logger = AccessLogger(mock_logger, log_format)
expected = '%s "%s" %%X {X} %%%s'
assert expected == access_logger._log_format
@pytest.mark.skipif(
IS_PYPY,
reason="""
Because of patching :py:class:`datetime.datetime`, under PyPy it
fails in :py:func:`isinstance` call in
:py:meth:`datetime.datetime.__sub__` (called from
:py:meth:`aiohttp.AccessLogger._format_t`):
*** TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
(Pdb) from datetime import datetime
(Pdb) isinstance(now, datetime)
*** TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
(Pdb) datetime.__class__
<class 'unittest.mock.MagicMock'>
(Pdb) isinstance(now, datetime.__class__)
False
Ref: https://bitbucket.org/pypy/pypy/issues/1187/call-to-isinstance-in-__sub__-self-other
Ref: https://github.com/celery/celery/issues/811
Ref: https://stackoverflow.com/a/46102240/595220
""", # noqa: E501
)
@pytest.mark.parametrize(
"log_format,expected,extra",
[
(
"%t",
"[01/Jan/1843:00:29:56 +0800]",
{"request_start_time": "[01/Jan/1843:00:29:56 +0800]"},
),
(
'%a %t %P %r %s %b %T %Tf %D "%{H1}i" "%{H2}i"',
(
"127.0.0.2 [01/Jan/1843:00:29:56 +0800] <42> "
'GET /path HTTP/1.1 200 42 3 3.141593 3141593 "a" "b"'
),
{
"first_request_line": "GET /path HTTP/1.1",
"process_id": "<42>",
"remote_address": "127.0.0.2",
"request_start_time": "[01/Jan/1843:00:29:56 +0800]",
"request_time": "3",
"request_time_frac": "3.141593",
"request_time_micro": "3141593",
"response_size": 42,
"response_status": 200,
"request_header": {"H1": "a", "H2": "b"},
},
),
],
)
def test_access_logger_atoms(
monkeypatch: Any, log_format: Any, expected: Any, extra: Any
) -> None:
class PatchedDatetime(datetime.datetime):
@staticmethod
def now(tz):
return datetime.datetime(1843, 1, 1, 0, 30, tzinfo=tz)
monkeypatch.setattr("datetime.datetime", PatchedDatetime)
monkeypatch.setattr("time.timezone", -28800)
monkeypatch.setattr("os.getpid", lambda: 42)
mock_logger = mock.Mock()
access_logger = AccessLogger(mock_logger, log_format)
request = mock.Mock(
headers={"H1": "a", "H2": "b"},
method="GET",
path_qs="/path",
version=aiohttp.HttpVersion(1, 1),
remote="127.0.0.2",
)
response = mock.Mock(headers={}, body_length=42, status=200)
access_logger.log(request, response, 3.1415926)
assert not mock_logger.exception.called, mock_logger.exception.call_args
mock_logger.info.assert_called_with(expected, extra=extra)
def test_access_logger_dicts() -> None:
log_format = "%{User-Agent}i %{Content-Length}o %{None}i"
mock_logger = mock.Mock()
access_logger = AccessLogger(mock_logger, log_format)
request = mock.Mock(
headers={"User-Agent": "Mock/1.0"}, version=(1, 1), remote="127.0.0.2"
)
response = mock.Mock(headers={"Content-Length": 123})
access_logger.log(request, response, 0.0)
assert not mock_logger.error.called
expected = "Mock/1.0 123 -"
extra = {
"request_header": {"User-Agent": "Mock/1.0", "None": "-"},
"response_header": {"Content-Length": 123},
}
mock_logger.info.assert_called_with(expected, extra=extra)
def test_access_logger_unix_socket() -> None:
log_format = "|%a|"
mock_logger = mock.Mock()
access_logger = AccessLogger(mock_logger, log_format)
request = mock.Mock(headers={"User-Agent": "Mock/1.0"}, version=(1, 1), remote="")
response = mock.Mock()
access_logger.log(request, response, 0.0)
assert not mock_logger.error.called
expected = "||"
mock_logger.info.assert_called_with(expected, extra={"remote_address": ""})
def test_logger_no_message() -> None:
mock_logger = mock.Mock()
access_logger = AccessLogger(mock_logger, "%r %{content-type}i")
extra_dict = {
"first_request_line": "-",
"request_header": {"content-type": "(no headers)"},
}
access_logger.log(None, None, 0.0)
mock_logger.info.assert_called_with("- (no headers)", extra=extra_dict)
def test_logger_internal_error() -> None:
mock_logger = mock.Mock()
access_logger = AccessLogger(mock_logger, "%D")
access_logger.log(None, None, "invalid")
mock_logger.exception.assert_called_with("Error in logging")
def test_logger_no_transport() -> None:
mock_logger = mock.Mock()
access_logger = AccessLogger(mock_logger, "%a")
access_logger.log(None, None, 0)
mock_logger.info.assert_called_with("-", extra={"remote_address": "-"})
def test_logger_abc() -> None:
class Logger(AbstractAccessLogger):
def log(self, request, response, time):
1 / 0
mock_logger = mock.Mock()
access_logger = Logger(mock_logger, None)
with pytest.raises(ZeroDivisionError):
access_logger.log(None, None, None)
class Logger(AbstractAccessLogger):
def log(self, request, response, time):
self.logger.info(
self.log_format.format(request=request, response=response, time=time)
)
mock_logger = mock.Mock()
access_logger = Logger(mock_logger, "{request} {response} {time}")
access_logger.log("request", "response", 1)
mock_logger.info.assert_called_with("request response 1")
async def test_exc_info_context(aiohttp_raw_server: Any, aiohttp_client: Any) -> None:
exc_msg = None
class Logger(AbstractAccessLogger):
def log(self, request, response, time):
nonlocal exc_msg
exc_msg = "{0.__name__}: {1}".format(*sys.exc_info())
async def handler(request):
raise RuntimeError("intentional runtime error")
logger = mock.Mock()
server = await aiohttp_raw_server(handler, access_log_class=Logger, logger=logger)
cli = await aiohttp_client(server)
resp = await cli.get("/path/to", headers={"Accept": "text/html"})
assert resp.status == 500
assert exc_msg == "RuntimeError: intentional runtime error"
async def test_async_logger(aiohttp_raw_server: Any, aiohttp_client: Any):
msg = None
class Logger(AbstractAsyncAccessLogger):
async def log(self, request, response, time):
nonlocal msg
msg = f"{request.path}: {response.status}"
async def handler(request):
return Response(text="ok")
logger = mock.Mock()
server = await aiohttp_raw_server(handler, access_log_class=Logger, logger=logger)
cli = await aiohttp_client(server)
resp = await cli.get("/path/to", headers={"Accept": "text/html"})
assert resp.status == 200
assert msg == "/path/to: 200"
async def test_contextvars_logger(aiohttp_server: Any, aiohttp_client: Any):
VAR = ContextVar("VAR")
async def handler(request):
return web.Response()
async def middleware(request, handler: Handler):
VAR.set("uuid")
return await handler(request)
msg = None
class Logger(AbstractAccessLogger):
def log(self, request, response, time):
nonlocal msg
msg = f"contextvars: {VAR.get()}"
app = web.Application(middlewares=[middleware])
app.router.add_get("/", handler)
server = await aiohttp_server(app, access_log_class=Logger)
client = await aiohttp_client(server)
resp = await client.get("/")
assert 200 == resp.status
assert msg == "contextvars: uuid"
| |
import sys
import unittest
import warnings
from copy import copy
from django import template
from django.template.base import builtins, TextNode, VariableNode
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from customtags.lexer import Lexer, Token, TokenStream, TokenStreamIterator
from customtags.expr_parser import ExprParser
from customtags.tokens import *
from customtags import arguments, core, exceptions, utils, parser, helpers, \
values, decorators
from _settings_patcher import *
from utils import pool, Renderer
## These are the expression parser test configurations
EXPR_ADD = "var + 1"
EXPR_FUNC = "add1(var)"
EXPR_JINJA_FILTER = "var|default(def)"
EXPR_DJANGO_FILTER = "var|default:def"
EXPR_DJANGO_FILTER_CHAIN = "var|default:def|upper"
EXPR_DJANGO_TRANS = "_(translate)"
EXPR_LITERAL_TRANS = '_("Literal String")'
EXPR_CHAIN_VARS = 'variable.member'
EXPR_DICT_CONST = 'variable["member"]'
EXPR_DICT_VAR = 'variable[member_var]'
EXPR_IF = '1 if False else 2'
EXPR_GTE = '1 >= 2'
EXPR_LT = '2 < 5'
EXPR_EQ = '7 == 5'
TOKEN_MAP = {
EXPR_ADD : [TOKEN_NAME, TOKEN_ADD, TOKEN_INTEGER],
EXPR_FUNC : [TOKEN_NAME, TOKEN_LPAREN, TOKEN_NAME, TOKEN_RPAREN],
EXPR_JINJA_FILTER : [TOKEN_NAME, TOKEN_PIPE, TOKEN_NAME, TOKEN_LPAREN,
TOKEN_NAME, TOKEN_RPAREN],
EXPR_DJANGO_FILTER : [TOKEN_NAME, TOKEN_PIPE, TOKEN_NAME, TOKEN_COLON, TOKEN_NAME],
EXPR_DJANGO_FILTER_CHAIN : [TOKEN_NAME, TOKEN_PIPE, TOKEN_NAME, TOKEN_COLON,
TOKEN_NAME, TOKEN_PIPE, TOKEN_NAME],
EXPR_DJANGO_TRANS : [TOKEN_NAME, TOKEN_LPAREN, TOKEN_NAME, TOKEN_RPAREN],
EXPR_CHAIN_VARS : [TOKEN_NAME, TOKEN_DOT, TOKEN_NAME],
EXPR_DICT_CONST : [TOKEN_NAME, TOKEN_LBRACKET, TOKEN_STRING, TOKEN_RBRACKET],
EXPR_DICT_VAR : [TOKEN_NAME, TOKEN_LBRACKET, TOKEN_NAME, TOKEN_RBRACKET],
EXPR_IF : [TOKEN_INTEGER, TOKEN_NAME, TOKEN_NAME, TOKEN_NAME, TOKEN_INTEGER],
EXPR_GTE : [TOKEN_INTEGER, TOKEN_GTEQ, TOKEN_INTEGER],
EXPR_LT : [TOKEN_INTEGER, TOKEN_LT, TOKEN_INTEGER],
EXPR_EQ : [TOKEN_INTEGER, TOKEN_EQ, TOKEN_INTEGER],
}
RESOLVE_MAP = {
EXPR_ADD : [3, {"var":2}],
EXPR_FUNC : [2, {"var":1, "add1":lambda x: x+1}],
EXPR_JINJA_FILTER : ["default", {"var":None, "def":"default"}],
EXPR_DJANGO_FILTER : ["default", {"var":None, "def":"default"}],
EXPR_DJANGO_FILTER_CHAIN : ["VALUE", {"var":"value","def":"default"}],
EXPR_DJANGO_TRANS : ["first", {"translate":"first"}],
EXPR_LITERAL_TRANS : ["Literal String", {}],
EXPR_CHAIN_VARS : [22, {"variable":{"member":22}}],
EXPR_DICT_CONST : [44, {"variable":{"member":44}}],
EXPR_DICT_VAR : [55, {"member_var":"member","variable":{"member":55}}],
EXPR_IF : [2, {}],
EXPR_GTE : [False, {}],
EXPR_LT : [True, {}],
EXPR_EQ : [False, {}],
}
class DummyTokens(list):
def __init__(self, *tokens, **kwargs):
tagname = kwargs['tagname'] if 'tagname' in kwargs else 'dummy_tag'
super(DummyTokens, self).__init__([tagname] + list(tokens))
def split_contents(self):
return self
@property
def contents(self):
return " ".join([str(item) for item in self])
class DummyParser(object):
def compile_filter(self, token):
return values.StaticValue(token)
def find_filter(self, token):
from django.template.base import builtins
filters = {}
for lib in builtins:
filters.update(lib.filters)
return filters[token]
dummy_parser = DummyParser()
class DummyContainer(object):
def __init__(self, tagname="dummy_tag"):
self.tag_args = []
self.tag_kwargs = {}
self.tagname = tagname
class _Warning(object):
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
class TokenStreamTestCase(TestCase):
test_tokens = [Token(TOKEN_BLOCK_BEGIN, ''),
Token(TOKEN_BLOCK_END, '')]
def test_simple(self):
ts = TokenStream(self.test_tokens)
assert ts.current.type is TOKEN_BLOCK_BEGIN
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_BLOCK_END
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_EOF
assert ts.size == 0
assert not bool(ts)
assert bool(ts.eos)
def test_look(self):
ts = TokenStream(self.test_tokens)
assert ts.current.type is TOKEN_BLOCK_BEGIN
assert ts.look().type is TOKEN_BLOCK_END
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_BLOCK_END
assert ts.look().type is TOKEN_EOF
assert bool(ts)
assert not bool(ts.eos)
next(ts)
assert ts.current.type is TOKEN_EOF
assert ts.size == 0
assert not bool(ts)
assert bool(ts.eos)
def test_iter(self):
token_types = [t.type for t in TokenStream(self.test_tokens)]
assert token_types == ['block_begin', 'block_end', ]
def test_copy(self):
ts1 = TokenStream(self.test_tokens)
assert ts1.size == 2
ts2 = copy(ts1)
assert ts1.size == ts2.size
next(ts2)
assert ts1.size > ts2.size
class LexerTestCase(TestCase):
def setUp(self):
self.lexer = Lexer()
def _do_expr_test(self, expr):
results = list(self.lexer.tokenize(expr))
tokentypes = TOKEN_MAP[expr]
self.assertEqual(len(results), len(tokentypes))
for index, result in enumerate(results):
self.assertEqual(result.type, tokentypes[index])
def test_add_expr(self):
self._do_expr_test(EXPR_ADD)
def test_simple_func_expr(self):
self._do_expr_test(EXPR_FUNC)
def test_jinja_filter_expr(self):
self._do_expr_test(EXPR_JINJA_FILTER)
def test_django_filter_expr(self):
self._do_expr_test(EXPR_DJANGO_FILTER)
def test_django_filter_chain_expr(self):
self._do_expr_test(EXPR_DJANGO_FILTER_CHAIN)
def test_django_trans(self):
self._do_expr_test(EXPR_DJANGO_TRANS)
def test_chain_vars(self):
self._do_expr_test(EXPR_CHAIN_VARS)
def test_dict_const(self):
self._do_expr_test(EXPR_DICT_CONST)
def test_dict_var(self):
self._do_expr_test(EXPR_DICT_VAR)
def test_if(self):
self._do_expr_test(EXPR_IF)
def test_equality(self):
self._do_expr_test(EXPR_EQ)
def test_lessthan(self):
self._do_expr_test(EXPR_LT)
def test_greaterthanequal(self):
self._do_expr_test(EXPR_GTE)
class ParserTestCase(TestCase):
def setUp(self):
self.lexer = Lexer()
self.parser = ExprParser()
def _do_resolve_expr(self, expr):
context = RESOLVE_MAP[expr][1]
expected = RESOLVE_MAP[expr][0]
self._test_expr(expr, context, expected)
def _test_expr(self, expr, context, expected):
stream = self.lexer.tokenize(expr)
result = self.parser.parse(stream, dummy_parser)
self.assertEqual(result.resolve(context), expected)
def test_add_expr(self):
self._do_resolve_expr(EXPR_ADD)
def test_simple_func_expr(self):
self._do_resolve_expr(EXPR_FUNC)
def test_jinja_filter_expr(self):
self._do_resolve_expr(EXPR_JINJA_FILTER)
def test_django_filter_expr(self):
self._do_resolve_expr(EXPR_DJANGO_FILTER)
def test_django_filter_chain_expr(self):
self._do_resolve_expr(EXPR_DJANGO_FILTER_CHAIN)
def test_django_trans(self):
self._do_resolve_expr(EXPR_DJANGO_TRANS)
def test_chain_vars(self):
self._do_resolve_expr(EXPR_CHAIN_VARS)
def test_dict_const(self):
self._do_resolve_expr(EXPR_DICT_CONST)
def test_dict_var(self):
self._do_resolve_expr(EXPR_DICT_VAR)
def test_if(self):
self._do_resolve_expr(EXPR_IF)
def test_equality(self):
self._do_resolve_expr(EXPR_EQ)
def test_lessthan(self):
self._do_resolve_expr(EXPR_LT)
def test_greaterthanequal(self):
self._do_resolve_expr(EXPR_GTE)
def test_function_arguments(self):
def concat(list1, list2):
return list1 + list2
def add(num1, num2=1):
return num1 + num2
def sum(*args):
return reduce(lambda x, y: x + y, args)
def makedict(**kwargs):
return kwargs
ctx = {
"concat" : concat,
"add" : add,
"sum" : sum,
"makedict" : makedict,
}
def c(d):
d = copy(d)
d.update(ctx)
return d
test_concat = "concat([1,2,3], list)"
test_add1 = "add(1)"
test_add2 = "add(2, 2)"
test_add3 = "add(num1=3, num2=3)"
test_add4 = "add(num1=4)"
test_add5 = "add(bad, num2=1)"
test_add6 = "add(1, num2=bad)"
test_sum1 = "sum(1,2,3,4)"
test_sum2 = "sum(1,2,3,var)"
test_sum3 = "sum(*list)"
test_sum4 = "sum(1,2,3,bad)"
test_sum5 = "sum(*bad)"
test_dict1 = "makedict(one=1, two=2, three=3)"
test_dict2 = "makedict(**dict)"
test_dict3 = "makedict(one=1, two=2, three=bad)"
test_dict4 = "makedict(**bad)"
test_undefined = "undefined(arg)"
## TODO: use the correct exceptions
self._test_expr(test_concat, c({"list":[4,5,6]}), [1,2,3,4,5,6])
self._test_expr(test_add1, ctx, 2)
self._test_expr(test_add2, ctx, 4)
self._test_expr(test_add3, ctx, 6)
self._test_expr(test_add4, ctx, 5)
self.assertRaises(TypeError, self._test_expr, test_add5, ctx, 0)
self.assertRaises(TypeError, self._test_expr, test_add6, ctx, 0)
self._test_expr(test_sum1, ctx, 10)
self._test_expr(test_sum2, c({"var":5}), 11)
self._test_expr(test_sum3, c({"list":[1,2,3,4,5]}), 15)
self.assertRaises(TypeError, self._test_expr, test_sum4, ctx, 0)
self.assertRaises(ValueError, self._test_expr, test_sum5, ctx, 0)
self._test_expr(test_dict1, ctx, {"one":1, "two":2, "three":3})
self._test_expr(test_dict2, c({"dict":{"a":"1","b":"2"}}), {"a":"1", "b":"2"})
self._test_expr(test_dict3, ctx, {"one":1, "two":2, "three":""})
#self.assertRaises(ValueError, self._test_expr, test_dict3, ctx, 0)
self.assertRaises(ValueError, self._test_expr, test_dict4, ctx, 0)
self.assertRaises(NameError, self._test_expr, test_undefined, c({"arg":1}), 0)
def _collectWarnings(observeWarning, f, *args, **kwargs):
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.itervalues():
if v is not None:
try:
v.__warningregistry__ = None
except: # pragma: no cover
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class CustomtagsTests(TestCase):
urls = 'customtags_tests.test_urls'
def failUnlessWarns(self, category, message, f, *args, **kwargs):
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown: # pragma: no cover
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]: # pragma: no cover
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertTrue(first.category is category)
return result
assertWarns = failUnlessWarns
def failUnlessRaises(self, excClass, callableObj, *args, **kwargs):
"""This is here to accomodate cases where debug is on in settings."""
try:
super(CustomtagsTests, self).failUnlessRaises(excClass, callableObj,
*args, **kwargs)
except template.TemplateSyntaxError, e:
if hasattr(e, "exc_info") and e.exc_info[0] is excClass:
return
else:
raise
assertRaises = failUnlessRaises
def _tag_tester(self, templates=[], *classes, **kwargs):
"""
Helper method to test a template tag by rendering it and checkout output.
*klass* is a template tag class (subclass of core.Tag)
*templates* is a sequence of a triple (template-string, output-string,
context)
"""
lib = kwargs["library"] if "library" in kwargs else template.Library()
for cls in classes:
lib.tag(cls.as_tag())
self.assertTrue(cls.name in lib.tags)
builtins.append(lib)
for tpl, out, ctx in templates:
t = template.Template(tpl)
c = template.Context(ctx)
s = t.render(c)
self.assertEqual(s, out)
for key, value in ctx.items():
self.assertEqual(c.get(key), value)
builtins.remove(lib)
def _decorator_tester(self, templates, library):
"""
Helper method to test a template tag by rendering it and checkout output.
*templates* is a sequence of a triple (template-string, output-string,
context)
*library* is a registration library for registering tags
"""
builtins.append(library)
for tpl, out, ctx in templates:
t = template.Template(tpl)
c = template.Context(ctx)
s = t.render(c)
self.assertEqual(s, out)
builtins.remove(library)
def test_01_simple_parsing(self):
"""
Test very basic single argument parsing
"""
options = core.Options(
arguments.Argument('myarg'),
)
options.initialize('dummy_tag')
dummy_tokens = DummyTokens('myval')
dummy_container = DummyContainer()
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(dummy_container.tag_args, [])
self.assertEqual(len(dummy_container.tag_kwargs), 1)
dummy_context = {"myval" : 1}
self.assertEqual(dummy_container.tag_kwargs['myarg'].resolve(dummy_context), 1)
dummy_tokens = DummyTokens('myval', 'myval2')
dummy_container = DummyContainer()
self.assertRaises(exceptions.TooManyArguments, options.parse,
dummy_parser, dummy_tokens, dummy_container)
def test_02_optional(self):
"""
Test basic optional argument parsing
"""
options = core.Options(
arguments.Argument('myarg'),
arguments.Argument('optarg', required=False, default=None),
)
options.initialize('dummy_tag')
dummy_tokens = DummyTokens('myval')
dummy_container = DummyContainer()
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(dummy_container.tag_args, [])
self.assertEqual(len(dummy_container.tag_kwargs), 2)
dummy_context = {"myval":2}
self.assertEqual(dummy_container.tag_kwargs['myarg'].resolve(dummy_context), 2)
self.assertEqual(dummy_container.tag_kwargs['optarg'].resolve(dummy_context), None)
dummy_tokens = DummyTokens('myval', 'optval')
dummy_container = DummyContainer()
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(dummy_container.tag_args, [])
self.assertEqual(len(dummy_container.tag_kwargs), 2)
dummy_context = {'myval':3, 'optval':4}
self.assertEqual(dummy_container.tag_kwargs['myarg'].resolve(dummy_context), 3)
self.assertEqual(dummy_container.tag_kwargs['optarg'].resolve(dummy_context), 4)
def test_03_breakpoints(self):
"""
Test parsing with breakpoints
"""
options = core.Options(
arguments.Argument('myarg'),
'as',
arguments.Argument('varname'),
'using',
arguments.Argument('using'),
)
options.initialize('dummy_tag')
dummy_tokens = DummyTokens('myval')
dummy_container = DummyContainer()
self.assertRaises(exceptions.TooFewArguments, options.parse, dummy_parser,
dummy_tokens, dummy_container)
dummy_tokens = DummyTokens('myval', 'myname')
dummy_container = DummyContainer()
self.assertRaises(exceptions.BreakpointExpected, options.parse, dummy_parser,
dummy_tokens, dummy_container)
dummy_tokens = DummyTokens('myval', 'as', 'myname', 'something')
dummy_container = DummyContainer()
self.assertRaises(exceptions.BreakpointExpected, options.parse, dummy_parser,
dummy_tokens, dummy_container)
dummy_tokens = DummyTokens('myval', 'as', 'myname', 'using', 'something')
dummy_container = DummyContainer()
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(dummy_container.tag_args, [])
self.assertEqual(len(dummy_container.tag_kwargs), 3)
dummy_context = {"myval":"MYVAL", "myname":"MYNAME", "something":"SOMETHING"}
self.assertEqual(dummy_container.tag_kwargs['myarg'].resolve(dummy_context), 'MYVAL')
self.assertEqual(dummy_container.tag_kwargs['varname'].resolve(dummy_context), 'MYNAME')
self.assertEqual(dummy_container.tag_kwargs['using'].resolve(dummy_context), 'SOMETHING')
def test_04_flag(self):
"""
Test flag arguments
"""
options = core.Options(
arguments.Flag('myflag', true_values=['on'], false_values=['off'])
)
options.initialize('dummy_flag_tag1')
dummy_context = {}
dummy_tokens = DummyTokens('on', tagname="dummy_flag_tag1")
dummy_container = DummyContainer("dummy_flag_tag1")
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(kwargs['myflag'].resolve(dummy_context), True)
dummy_tokens = DummyTokens('off', tagname="dummy_flag_tag1")
dummy_container = DummyContainer("dummy_flag_tag1")
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(kwargs['myflag'].resolve(dummy_context), False)
dummy_tokens = DummyTokens('myval', tagname="dummy_flag_tag1")
dummy_container = DummyContainer("dummy_flag_tag1")
self.assertRaises(exceptions.InvalidFlag, options.parse, dummy_parser,
dummy_tokens, dummy_container)
self.assertRaises(ImproperlyConfigured, arguments.Flag, 'myflag')
# test case sensitive flag
options = core.Options(
arguments.Flag('myflag', true_values=['on'], default=False, case_sensitive=True)
)
options.initialize('dummy_flag_tag2')
dummy_tokens = DummyTokens('On', tagname="dummy_flag_tag2")
dummy_container = DummyContainer("dummy_flag_tag2")
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
self.assertRaises(exceptions.TooManyArguments, options.parse,
dummy_parser, dummy_tokens, dummy_container)
dummy_tokens = DummyTokens('on', tagname="dummy_flag_tag2")
dummy_container = DummyContainer("dummy_flag_tag2")
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(kwargs['myflag'].resolve(dummy_context), True)
# test multi-flag
options = core.Options(
arguments.Flag('flagone', true_values=['on'], default=False),
arguments.Flag('flagtwo', false_values=['off'], default=True),
)
options.initialize('dummy_flag_tag3')
dummy_tokens = DummyTokens('on', 'off', tagname="dummy_flag_tag3")
dummy_container = DummyContainer("dummy_flag_tag3")
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(kwargs['flagone'].resolve(dummy_context), True)
self.assertEqual(kwargs['flagtwo'].resolve(dummy_context), False)
dummy_tokens = DummyTokens('off', tagname="dummy_flag_tag3")
dummy_container = DummyContainer("dummy_flag_tag3")
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(kwargs['flagone'].resolve(dummy_context), False)
self.assertEqual(kwargs['flagtwo'].resolve(dummy_context), False)
dummy_tokens = DummyTokens(tagname="dummy_flag_tag3")
dummy_container = DummyContainer("dummy_flag_tag3")
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(kwargs['flagone'].resolve(dummy_context), False)
self.assertEqual(kwargs['flagtwo'].resolve(dummy_context), True)
def test_05_multi_value(self):
"""
Test simple multi value arguments
"""
options = core.Options(
arguments.MultiValueArgument('myarg')
)
options.initialize('dummy_tag')
# test single token MVA
dummy_tokens = DummyTokens('myval')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 1)
dummy_context = {"myval": "val1", "myval2": "val2", "myval3": "val3"}
# test resolving to list
self.assertEqual(kwargs['myarg'].resolve(dummy_context), ["val1"])
# test double token MVA
dummy_tokens = DummyTokens('myval', 'myval2')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 1)
self.assertEqual(kwargs['myarg'].resolve(dummy_context), ['val1', 'val2'])
# test triple token MVA
dummy_tokens = DummyTokens('myval', 'myval2', 'myval3')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 1)
self.assertEqual(kwargs['myarg'].resolve(dummy_context), ['val1', 'val2', 'val3'])
# test max_values option
options = core.Options(
arguments.MultiValueArgument('myarg', max_values=2)
)
options.initialize('dummy_tag')
dummy_tokens = DummyTokens('myval')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 1)
dummy_context = {"myval": 11, "myval2": 22, "myval3": 33}
self.assertEqual(kwargs['myarg'].resolve(dummy_context), [11])
dummy_tokens = DummyTokens('myval', 'myval2')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 1)
self.assertEqual(kwargs['myarg'].resolve(dummy_context), [11, 22])
dummy_tokens = DummyTokens('myval', 'myval2', 'myval3')
dummy_container = DummyContainer()
self.assertRaises(exceptions.TooManyArguments, options.parse, dummy_parser,
dummy_tokens, dummy_container)
# test no resolve
options = core.Options(
arguments.MultiValueArgument('myarg', resolve=False)
)
options.initialize('dummy_tag')
dummy_tokens = DummyTokens('myval', "'myval2'")
dummy_container = DummyContainer()
dummy_context = {"myval": 101, "myval2": 202, "myval3": 303}
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(kwargs['myarg'].resolve(dummy_context), ['myval', 'myval2'])
def test_06_complex(self):
"""
test a complex tag option parser
"""
options = core.Options(
arguments.Argument('singlearg'),
arguments.MultiValueArgument('multiarg', required=False),
'as',
arguments.Argument('varname', required=False),
'safe',
arguments.Flag('safe', true_values=['true'], false_values=['false'], default=False)
)
options.initialize('dummy_tag')
dummy_context = {}
# test simple 'all arguments given'
dummy_tokens = DummyTokens(1, 2, 3, 'as', 4, 'safe', 'true')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 4)
for key, value in [('singlearg', 1), ('multiarg', [2,3]), ('varname', 4), ('safe', True)]:
self.assertEqual(kwargs[key].resolve(dummy_context), value)
# test 'only first argument given'
dummy_tokens = DummyTokens(1)
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 4)
for key, value in [('singlearg', 1), ('multiarg', []), ('varname', None), ('safe', False)]:
self.assertEqual(kwargs[key].resolve(dummy_context), value)
# test first argument and last argument given
dummy_tokens = DummyTokens(2, 'safe', 'false')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
self.assertEqual(args, [])
self.assertEqual(len(kwargs), 4)
for key, value in [('singlearg', 2), ('multiarg', []), ('varname', None), ('safe', False)]:
self.assertEqual(kwargs[key].resolve(dummy_context), value)
def test_07_cycle(self):
"""
This test re-implements django's cycle tag (because it's quite crazy)
and checks if it works.
"""
from itertools import cycle as itertools_cycle
class Cycle(core.Tag):
name = 'classy_cycle'
options = core.Options(
arguments.MultiValueArgument('values'),
'as',
arguments.Argument('varname', required=False, resolve=False),
)
def render_tag(self, context, values, varname):
if self not in context.render_context:
context.render_context[self] = itertools_cycle(values)
cycle_iter = context.render_context[self]
value = cycle_iter.next()
if varname:
context[varname] = value
return value
lib = template.Library()
lib.tag(Cycle.as_tag())
self.assertTrue('classy_cycle' in lib.tags)
origtpl = template.Template("""
{% for thing in sequence %}{% cycle "1" "2" "3" "4" %}{% endfor %}
""")
sequence = [1,2,3,4,5,6,7,8,9,10]
context = template.Context({'sequence': sequence})
original = origtpl.render(context)
builtins.insert(0, lib)
classytpl = template.Template("""
{% for thing in sequence %}{% classy_cycle "1" "2" "3" "4" %}{% endfor %}
""")
classy = classytpl.render(context)
self.assertEqual(original, classy)
origtpl = template.Template("""
{% for thing in sequence %}{% cycle "1" "2" "3" "4" as myvarname %}{% endfor %}
""")
sequence = [1,2,3,4,5,6,7,8,9,10]
context = template.Context({'sequence': sequence})
original = origtpl.render(context)
builtins.insert(0, lib)
classytpl = template.Template("""
{% for thing in sequence %}{% classy_cycle "1" "2" "3" "4" as myvarname %}{% endfor %}
""")
classy = classytpl.render(context)
self.assertEqual(original, classy)
def test_08_naming(self):
# test implicit naming
class MyTag(core.Tag):
pass
lib = template.Library()
lib.tag(MyTag.as_tag())
self.assertTrue('my_tag' in lib.tags, "'my_tag' not in %s" % lib.tags.keys())
# test explicit naming
class MyTag2(core.Tag):
name = 'my_tag_2'
lib = template.Library()
lib.tag(MyTag2.as_tag())
self.assertTrue('my_tag_2' in lib.tags, "'my_tag_2' not in %s" % lib.tags.keys())
# test named registering
lib = template.Library()
lib.tag('my_tag_3', MyTag.as_tag())
self.assertTrue('my_tag_3' in lib.tags, "'my_tag_3' not in %s" % lib.tags.keys())
self.assertTrue('my_tag' not in lib.tags, "'my_tag' in %s" % lib.tags.keys())
lib = template.Library()
lib.tag('my_tag_4', MyTag2.as_tag())
self.assertTrue('my_tag_4' in lib.tags, "'my_tag_4' not in %s" % lib.tags.keys())
self.assertTrue('my_tag2' not in lib.tags, "'my_tag2' in %s" % lib.tags.keys())
def test_09_hello_world(self):
class Hello(core.Tag):
options = core.Options(
arguments.Argument('name', required=False, default='world'),
'as',
arguments.Argument('varname', required=False, resolve=False)
)
def render_tag(self, context, name, varname):
output = 'hello %s' % name
if varname:
context[varname] = output
return ''
return output
tpls = [
('{% hello %}', 'hello world', {}),
('{% hello "customtags" %}', 'hello customtags', {}),
('{% hello as myvar %}', '', {'myvar': 'hello world'}),
('{% hello "my friend" as othervar %}', '', {'othervar': 'hello my friend'})
]
self._tag_tester(tpls, Hello)
def test_10_django_vs_classy(self):
pool.autodiscover()
for tagname, data in pool:
controls = data.get('controls', None)
if not controls: # pragma: no cover
continue
tag = data['tag']
renderer = Renderer(tag)
i = 0
for djstring, ctstring, ctx in controls:
i += 1
dj = renderer.django(djstring, ctx)
cy = renderer.classy(ctstring, ctx)
self.assertNotEqual(djstring, ctstring)
self.assertEqual(dj, cy,
("Classytag implementation of %s (control %s) returned "
"something other than the official tag:\n"
"Classy: %r\nDjango: %r" % (tagname, i, cy, dj))
)
def test_11_blocks(self):
class Blocky(core.Tag):
options = core.Options(
blocks=['a', 'b', 'c', 'd', 'e'],
)
def render_tag(self, context, **nodelists):
tpl = "%(a)s;%(b)s;%(c)s;%(d)s;%(e)s"
data = {}
for key, value in nodelists.items():
data[key] = value.render(context)
return tpl % data
templates = [
('{% blocky %}1{% a %}2{% b %}3{% c %}4{% d %}5{% e %}', '1;2;3;4;5', {},),
('{% blocky %}12{% b %}3{% c %}4{% d %}5{% e %}', '12;;3;4;5', {},),
('{% blocky %}123{% c %}4{% d %}5{% e %}', '123;;;4;5', {},),
('{% blocky %}1234{% d %}5{% e %}', '1234;;;;5', {},),
('{% blocky %}12345{% e %}', '12345;;;;', {},),
('{% blocky %}1{% a %}23{% c %}4{% d %}5{% e %}', '1;23;;4;5', {},),
('{% blocky %}1{% a %}23{% c %}45{% e %}', '1;23;;45;', {},),
]
self._tag_tester(templates, Blocky)
def test_12_astag(self):
class Dummy(helpers.AsTag):
options = core.Options(
'as',
arguments.Argument('varname', resolve=False, required=False),
)
def get_value(self, context):
return "dummy"
templates = [
('{% dummy %}:{{ varname }}', 'dummy:', {},),
('{% dummy as varname %}:{{ varname }}', ':dummy', {},),
]
self._tag_tester(templates, Dummy)
def test_13_inclusion_tag(self):
class Inc(helpers.InclusionTag):
template = 'test.html'
options = core.Options(
arguments.Argument('var'),
)
def get_context(self, context, var):
return {'var': var}
templates = [
('{% inc var %}', 'inc', {'var': 'inc'},),
]
self._tag_tester(templates, Inc)
class Inc2(helpers.InclusionTag):
template = 'test.html'
templates = [
('{% inc2 %}', '', {},),
]
self._tag_tester(templates, Inc2)
def test_14_integer_variable(self):
from django.conf import settings
options = core.Options(
arguments.IntegerArgument('integer', resolve=False),
)
options.initialize('dummy_tag')
# this is settings dependant!
old = settings.DEBUG
# test okay
settings.DEBUG = False
dummy_tokens = DummyTokens('1')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
dummy_context = {}
self.assertEqual(kwargs['integer'].resolve(dummy_context), 1)
# test warning
dummy_tokens = DummyTokens('one')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
dummy_context = {}
message = arguments.IntegerValue.errors['clean'] % {'value': repr('one')}
self.assertWarns(exceptions.TemplateSyntaxWarning, message, kwargs['integer'].resolve, dummy_context)
self.assertEqual(kwargs['integer'].resolve(dummy_context), values.IntegerValue.value_on_error)
# test exception
settings.DEBUG = True
dummy_tokens = DummyTokens('one')
dummy_container = DummyContainer()
args, kwargs = dummy_container.tag_args, dummy_container.tag_kwargs
options.parse(dummy_parser, dummy_tokens, dummy_container)
dummy_context = {}
message = values.IntegerValue.errors['clean'] % {'value': repr('one')}
self.assertRaises(template.TemplateSyntaxError, kwargs['integer'].resolve, dummy_context)
# test the same as above but with resolving
settings.DEBUG = False
assertTrue = self.assertTrue
class IntegerTag(core.Tag):
options = core.Options(
arguments.IntegerArgument('integer')
)
def render_tag(self, context, integer):
return integer
lib = template.Library()
lib.tag(IntegerTag.as_tag())
builtins.append(lib)
self.assertTrue('integer_tag' in lib.tags)
# test okay
tpl = template.Template("{% integer_tag i %}")
context = template.Context({'i': '1'})
self.assertEqual(tpl.render(context), '1')
# test warning
context = template.Context({'i': 'one'})
message = values.IntegerValue.errors['clean'] % {'value': repr('one')}
self.assertWarns(exceptions.TemplateSyntaxWarning, message, tpl.render, context)
self.assertEqual(tpl.render(context), values.IntegerValue.value_on_error)
# test exception
settings.DEBUG = True
context = template.Context({'i': 'one'})
message = arguments.IntegerValue.errors['clean'] % {'value': repr('one')}
self.assertRaises(template.TemplateSyntaxError, tpl.render, context)
# reset settings
builtins.remove(lib)
settings.DEBUG = old
def test_15_not_implemented_errors(self):
lib = template.Library()
class Fail(core.Tag):
pass
class Fail2(helpers.AsTag):
pass
class Fail3(helpers.AsTag):
options = core.Options(
'as',
)
class Fail4(helpers.AsTag):
options = core.Options(
'as',
arguments.Argument('varname', resolve=False),
)
class Fail5(helpers.InclusionTag):
pass
lib.tag(Fail.as_tag())
lib.tag(Fail2.as_tag())
lib.tag(Fail3.as_tag())
lib.tag(Fail4.as_tag())
lib.tag(Fail5.as_tag())
builtins.append(lib)
self.assertTrue('fail' in lib.tags)
self.assertTrue('fail2' in lib.tags)
self.assertTrue('fail3' in lib.tags)
self.assertTrue('fail4' in lib.tags)
self.assertTrue('fail5' in lib.tags)
context = template.Context({})
tpl = template.Template("{% fail %}")
self.assertRaises(NotImplementedError, tpl.render, context)
self.assertRaises(ImproperlyConfigured, template.Template, "{% fail2 %}")
self.assertRaises(ImproperlyConfigured, template.Template, "{% fail3 %}")
tpl = template.Template("{% fail4 as something %}")
self.assertRaises(NotImplementedError, tpl.render, context)
self.assertRaises(ImproperlyConfigured, template.Template, "{% fail5 %}")
builtins.remove(lib)
def test_16_too_many_arguments(self):
lib = template.Library()
class NoArg(core.Tag):
pass
lib.tag(NoArg.as_tag())
builtins.append(lib)
self.assertTrue('no_arg' in lib.tags)
self.assertRaises(exceptions.TooManyArguments, template.Template, "{% no_arg a arg %}")
builtins.remove(lib)
def test_17_repetition(self):
class Switch(core.Tag):
name = "switch"
options = core.Options(
arguments.Argument('state'),
arguments.Repetition(
'cases',
arguments.BlockTag(
'case',
arguments.Argument('value'),
arguments.NodeList('nodelist'),
arguments.EndTag()
),
min_reps = 1
),
arguments.BlockTag(
'default',
arguments.NodeList("default"),
arguments.EndTag()
),
arguments.EndTag()
)
def render_tag(self, context, state, cases, default):
for case in cases:
value = case.kwargs['value']
if state == value:
nodelist = case.kwargs['nodelist']
context.push()
try:
return nodelist.render(context)
finally:
context.pop()
context.push()
try:
return default.render(context)
finally:
context.pop()
tpls = [
("""{% switch 1 %}
{% case 1 %}ONE{% endcase %}
{% case 2 %}TWO{% endcase %}
{% default %}DEFAULT{% enddefault %}
{% endswitch %}""", 'ONE',{}),
("""{% switch 2 %}
{% case 1 %}ONE{% endcase %}
{% case 2 %}TWO{% endcase %}
{% default %}DEFAULT{% enddefault %}
{% endswitch %}""", 'TWO',{}),
("""{% switch 3 %}
{% case 1 %}ONE{% endcase %}
{% case 2 %}TWO{% endcase %}
{% default %}DEFAULT{% enddefault %}
{% endswitch %}""", 'DEFAULT',{}),
]
self._tag_tester(tpls, Switch)
var_template = """{% switch var %}
{% case "A" %}A{% endcase %}
{% case "B" %}B{% endcase %}
{% case "C" %}C{% endcase %}
{% default %}DEFAULT{% enddefault %}
{% endswitch %}"""
tpls = (
[var_template, "A", { 'var' : "A" }],
[var_template, "B", { 'var' : "B" }],
[var_template, "C", { 'var' : "C" }],
[var_template, "DEFAULT", { 'var' : "D" }],
[var_template, "DEFAULT", { 'var' : "X" }],
)
self._tag_tester(tpls, Switch)
## Testing get_nodes_by_type
lib = template.Library()
lib.tag(Switch.as_tag())
builtins.append(lib)
tmpl = template.Template(var_template)
nodes = tmpl.nodelist.get_nodes_by_type(TextNode)
self.assertEqual(len(nodes),4)
builtins.remove(lib)
try:
self._tag_tester([("""{% switch 1 %}{% default %}DEFAULT{% enddefault %}
{% endswitch %}""","DEFAULT",{})], Switch)
except exceptions.ArgumentRequiredError, e:
return
self.assertTrue(False)
def test_18_decorators(self):
lib = template.Library()
block = decorators.block_decorator(lib)
@block
def hello(context, nodelist, name="world", as_name="message"):
context.push()
context[as_name] = "hello %s" % name
rendered = nodelist.render(context)
context.pop()
return rendered
tpls = [
('{% hello %}{{ message }}{% endhello %}', 'hello world', {}),
('{% hello name="customtags" %}{{ message }}{% endhello %}',
'hello customtags', {}),
('{% hello "customtags" %}{{ message }}{% endhello %}', 'hello customtags', {}),
('{% hello as myvar %}{{ myvar }}{% endhello %}', 'hello world', {}),
('{% hello name="my friend" as othervar %}{{ othervar }}{% endhello %}',
'hello my friend', {}),
('{% hello "my friend" as othervar %}{{ othervar }}{% endhello %}',
'hello my friend', {}),
]
self._decorator_tester(tpls, lib)
lib2 = template.Library()
@block(lib2, "alt_name")
def dummy(context, nodelist, as_name="dummy"):
context.push()
context[as_name] = "dummy"
rendered = nodelist.render(context)
context.pop()
return rendered
tpls2 = [
('{% alt_name %}{{ dummy }}{% endalt_name %}', 'dummy', {}),
('{% alt_name as varname %}{{ varname }}{% endalt_name %}', 'dummy', {}),
]
self._decorator_tester(tpls2, lib2)
lib3 = template.Library()
function = decorators.function_decorator(lib3)
@function
def hello_func(context, name=None):
name = name if name is not None else "world"
return "hello %s" % name
tpls3 = [
('{% hello_func %}', 'hello world', {}),
('{% hello_func name="customtags" %}', 'hello customtags', {}),
('{% hello_func "customtags" %}', 'hello customtags', {}),
('{% hello_func as myvar %}:{{ myvar }}', ':hello world', {}),
('{% hello_func name="my friend" as othervar %}{{othervar}}',
'hello my friend', {}),
('{% hello_func "my friend" as othervar %}:{{ othervar }}',
':hello my friend', {}),
]
self._decorator_tester(tpls3, lib3)
tpls3_b = [
('{% hello_func vals.green %}', 'hello verde', { "vals" : { "green" : "verde" } }),
('{% hello_func vals.blue %}', 'hello world', { "vals" : { "green" : "verde" } }),
]
self._decorator_tester(tpls3_b, lib3)
lib4 = template.Library()
@function(lib4, "alt_func")
def dummy_func(as_name="dummy"):
return "dummy"
tpls4 = [
('{% alt_func %}', 'dummy', {}),
('{% alt_func as varname %}:{{ varname }}', ':dummy', {}),
]
self._decorator_tester(tpls4, lib4)
def test_19_literal(self):
class CodeBlock(core.Tag):
options = core.Options(
arguments.Literal('literal'),
arguments.EndTag()
)
def render_tag(self, context, literal):
return literal
tpls = [
('{% code_block %}{# comment #}{% endcode_block %}', '{# comment #}', {}),
('{% code_block %}{{ variable }}{% endcode_block %}', '{{ variable }}', {}),
('{% code_block %}{% tag %}{% endcode_block %}', '{% tag %}', {}),
('{% code_block %}Text{% endcode_block %}', 'Text', {}),
('{% code_block %}{# comment #}{{ var }}{% tag %}Text{% endcode_block %}',
'{# comment #}{{ var }}{% tag %}Text', {}),
]
self._tag_tester(tpls, CodeBlock)
def test_20_expression(self):
class Set(core.Tag):
options = core.Options(
arguments.Argument("variable", resolve=False),
arguments.Constant("="),
arguments.Expression("value")
)
def render_tag(self, context, variable, value):
context[variable] = value
return ""
tpls = [
('{% set var = value %}{{var}}','2',{"var":1,"value":2}),
('{% set var=value %}{{var}}','2',{"var":1,"value":2}),
('{% set var = value - 1 %}{{var}}','1',{"var":1,"value":2}),
('{% set var = value * value + var %}{{var}}','5',{"var":1,"value":2}),
('{% set var = value * (value + var) %}{{var}}','6',{"var":1,"value":2}),
('{% set var = value|default:"default" %}{{var}}','default',{"var":1,}),
('{% set var = value|default:"default"|upper %}{{var}}','VALUE',{"value":"value"}),
('{% set var="https://url.com?key=value" %}{{var}}','https://url.com?key=value',{}),
]
self._tag_tester(tpls, Set)
lib = template.Library()
lib.tag(Set.as_tag())
builtins.append(lib)
self.assertRaises(template.TemplateSyntaxError, template.Template, "{{ value|spaceout }}")
self.assertRaises(template.TemplateSyntaxError, template.Template,
"{% set var = value|spaceout %}")
t = template.Template("{% load ct_filter %}{% set var = value|spaceout %}{{var}}")
c = template.Context({"value":"separate"})
s = t.render(c)
self.assertEqual(s, "s e p a r a t e")
builtins.remove(lib)
def test_21_chain(self):
class Echo(core.Tag):
options = core.Options(arguments.Argument("arg"))
def render_tag(self, context, arg):
return arg
class Field(object):
def __init__(self, value):
self.field = value
class Attr(object):
def __init__(self, value):
self._attr = value
def __getattr__(self, name):
if name == "attr":
return self._attr
raise AttributeError()
class Item(object):
def __init__(self, value):
self._item = value
def __getitem__(self, key):
if key == "item":
return self._item
raise KeyError()
class Method(object):
def __init__(self, value):
self._value = value
def get(self):
return self._value
## {{ chain.dict.0.field.attr.item.get }}
chain1 = { "dict": [ Field(Attr(Item(Method("value")))) ] }
## {{ chain.0.get.dict.attr.field.item }}
chain2 = [ { "dict": Method(Attr(Field(Item("value")))) } ]
## {{ chain.get.0.dict.attr.field.item }}
chain3 = Method([ { "dict": Attr(Field(Item("value"))) } ])
tpls = [
('{{ chain.dict.0.field.attr.item.get }}','value',{"chain":chain1}),
('{{ chain.0.dict.get.attr.field.item }}','value',{"chain":chain2}),
('{{ chain.get.0.dict.attr.field.item }}','value',{"chain":chain3}),
]
self._tag_tester(tpls)
tpls = [
('{% echo chain.dict.0.field.attr.item.get %}','value',{"chain":chain1}),
('{% echo chain.0.dict.get.attr.field.item %}','value',{"chain":chain2}),
('{% echo chain.get.0.dict.attr.field.item %}','value',{"chain":chain3}),
('{% echo chain.dict.0.field.attr.item.get() %}','value',{"chain":chain1}),
('{% echo chain.0.dict.get().attr.field.item %}','value',{"chain":chain2}),
('{% echo chain.get().0.dict.attr.field.item %}','value',{"chain":chain3}),
]
self._tag_tester(tpls, Echo)
def test_22_commas(self):
class Concat(core.Tag):
options = core.Options(arguments.MultiValueArgument("args"))
def render_tag(self, context, args):
return "".join([str(arg) for arg in args])
class ConcatCommas(core.Tag):
options = core.Options(arguments.MultiValueArgument("args", commas=True))
def render_tag(self, context, args):
return "".join([str(arg) for arg in args])
class With(core.Tag):
options = core.Options(
arguments.MultiValueKeywordArgument("arguments"),
arguments.NodeList("nodelist"),
arguments.EndTag()
)
def render_tag(self, context, arguments, nodelist):
context.push()
try:
context.update(arguments)
return nodelist.render(context)
finally:
context.pop()
class WithCommas(core.Tag):
options = core.Options(
arguments.MultiValueKeywordArgument("arguments", commas=True),
arguments.NodeList("nodelist"),
arguments.EndTag()
)
def render_tag(self, context, arguments, nodelist):
context.push()
try:
context.update(arguments)
return nodelist.render(context)
finally:
context.pop()
tpls = [
('{% concat "a" "b" "c" d "e" "f" g %}', 'abcdefg' , {"d":"d","g":"g"}),
('{% concat num * 3 22 / 2 %}', '9911' , {"num" : 33}),
('{% concat d if True else 1 22 / 2 %}', 'd11' , {"d":"d","g":"g"}),
('{% concat_commas "a","b","c",d,"e","f",g %}', 'abcdefg' , {"d":"d","g":"g"}),
('{% concat_commas num * 3, 22 / 2 %}', '9911' , {"num" : 33}),
('{% concat_commas d if True else 1, 22 / 2 %}', 'd11' , {"d":"d","g":"g"}),
('{% with a=1 b=6/3 %}{{a}}|{{b}}{% endwith %}', '1|2', {}),
('{% with a=1 if False else 2 b=6/3 %}{{a}}|{{b}}{% endwith %}', '2|2', {}),
('{% with_commas a=1, b=6/3 %}{{a}}|{{b}}{% endwith_commas %}', '1|2', {}),
('{% with_commas a=1 if False else 2, b=6 %}{{a}}|{{b}}{% endwith_commas %}','2|6',{}),
]
self._tag_tester(tpls, Concat, ConcatCommas, With, WithCommas)
def test_23_function_signature(self):
class Macro(core.Tag):
options = core.Options(
arguments.Argument("macroname", resolve=False),
arguments.Constant("("),
arguments.MultiValueArgument("arg_names", resolve=False,
required=False, commas=True),
arguments.Constant(")"),
arguments.NodeList("nodelist"),
arguments.EndTag()
)
def render_tag(self, context, macroname, arg_names, nodelist):
def call_macro(*args):
inner_context = template.Context({macroname: call_macro})
i = 0
for arg_name in arg_names:
inner_context[arg_name] = args[i]
i += 1
return nodelist.render(inner_context)
context[macroname] = call_macro
return ""
class Echo(core.Tag):
options = core.Options(arguments.Argument("expression"))
def render_tag(self, context, expression):
return expression
tpls = [
('{% macro hello() %}Hello!{% endmacro %}{% echo hello() %}','Hello!',{}),
('{% macro hello(name) %}Hello {{name}}!{% endmacro %}{% echo hello("Fred") %}',
'Hello Fred!',{}),
('{% macro hello(first, last) %}Hello {{first}} {{last}}!{% endmacro %}'
'{% echo hello("John", lastname) %}','Hello John Smith!',{"lastname":"Smith"}),
]
self._tag_tester(tpls, Macro, Echo)
def test_24_child_tags(self):
class Hello(core.Tag):
options = core.Options(
arguments.Argument("name", required=False)
)
def render_tag(self, context, name=None):
return "Hello " + str(self.get_name(name)) + "!"
def get_name(self, name):
return name if name is not None else "World"
class Hola(Hello):
def render_tag(self, context, name=None):
return "Hola " + str(self.get_name(name)) + "!"
tpls = [
('{% hello %}','Hello World!',{}),
('{% hello "Joe" %}','Hello Joe!',{}),
('{% hello (2 + 19) * 2 %}','Hello 42!',{}),
('{% hello name %}','Hello Josephine!',{"name": "Josephine"}),
('{% hola %}','Hola World!',{}),
('{% hola "Jose" %}','Hola Jose!',{}),
('{% hola (2 + 19) * 2 %}','Hola 42!',{}),
('{% hola name %}','Hola Josefina!',{"name": "Josefina"}),
]
self._tag_tester(tpls, Hello, Hola)
def test_25_one_of(self):
class Scope(core.Tag):
options = core.Options(
arguments.OneOf(
arguments.MultiValueKeywordArgument("newcontext", required=True),
arguments.Argument("newcontext")
),
arguments.NodeList("nodelist"),
arguments.EndTag()
)
def render_tag(self, context, nodelist, newcontext):
context.push()
try:
if not isinstance(newcontext, dict):
raise TypeError("Scope requires a dictionary.")
context.update(newcontext)
return nodelist.render(context)
finally:
context.pop()
tpls = [
('{% scope num=(1+2+3+4)/5 str="xyz" %}{{num}},{{str}}{% endscope %}','2,xyz',{}),
('{% scope {"num":(1+2+3+4)/5,"str":"xyz"} %}{{num}},{{str}}{% endscope %}','2,xyz',{}),
('{% scope dict %}{{num}},{{str}}{% endscope %}','2,xyz',
{"dict":{"num":(1+2+3+4)/5,"str":"xyz"}}),
]
self._tag_tester(tpls, Scope)
self.assertRaises(exceptions.ArgumentRequiredError, self._tag_tester,
[('{% scope %}{% endscope %}','',{})], Scope)
## Testing get_nodes_by_type
lib = template.Library()
lib.tag(Scope.as_tag())
builtins.append(lib)
for tpl in tpls:
tmpl = template.Template(tpl[0])
nodes = tmpl.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(nodes),2)
builtins.remove(lib)
class Scope(core.Tag):
options = core.Options(
arguments.OneOf(
arguments.MultiValueKeywordArgument("newcontext", required=False),
arguments.Argument("newcontext")
),
arguments.NodeList("nodelist"),
arguments.EndTag()
)
def render_tag(self, context, nodelist, newcontext):
context.push()
try:
if not isinstance(newcontext, dict):
raise TypeError("Scope requires a dictionary.")
context.update(newcontext)
return nodelist.render(context)
finally:
context.pop()
tpls = [
('{% scope %}Hello World!{% endscope %}','Hello World!',{}),
]
self._tag_tester(tpls, Scope)
def test_26_flag_after_arguments(self):
class TestTag(core.Tag):
options = core.Options(
arguments.MultiValueArgument("myargs", required=False),
arguments.Flag('allowed', true_values=['allowed'], default=False)
)
def render_tag(self, context, myargs, allowed):
if not allowed:
return "NOT ALLOWED!"
else:
return ", ".join(str(x) for x in myargs)
tpls = [
("{% test_tag 'one' 'two' 'three' %}", 'NOT ALLOWED!',{}),
("{% test_tag 'one' 'two' 'three' allowed %}", 'one, two, three',{}),
('{% test_tag one two three allowed %}', '1, 2, 3',{'one':1,'two':2,'three':3}),
]
self._tag_tester(tpls, TestTag)
class TestTag(core.Tag):
options = core.Options(
arguments.MultiValueArgument("myargs", required=False),
arguments.Optional(
arguments.Flag('allowed', true_values=['allowed'], default=False),
'with'
)
)
def render_tag(self, context, myargs, allowed):
if not allowed:
return "NOT ALLOWED!"
else:
return ", ".join(str(x) for x in myargs)
tpls = [
("{% test_tag 'one' 'two' 'three' %}", 'NOT ALLOWED!',{}),
("{% test_tag 'one' 'two' 'three' with %}", 'NOT ALLOWED!',{}),
("{% test_tag 'one' 'two' 'three' allowed with %}", 'one, two, three',{}),
('{% test_tag one two allowed with %}', '1, 2',{'one':1,'two':2}),
]
self._tag_tester(tpls, TestTag)
class TestTag(core.Tag):
options = core.Options(
arguments.MultiValueArgument("myargs", required=False),
arguments.Optional(
arguments.Flag('allowed', true_values=['allowed'], false_values=['not']),
'with'
)
)
def render_tag(self, context, myargs, allowed):
if not allowed:
return "NOT ALLOWED!"
else:
return ", ".join(str(x) for x in myargs)
tpls = [
("{% test_tag 'one' 'two' 'three' not with %}", 'NOT ALLOWED!',{}),
("{% test_tag 'one' 'two' 'three' allowed with %}", 'one, two, three',{}),
('{% test_tag one two allowed with %}', '1, 2',{'one':1,'two':2}),
('{% test_tag one two not with %}', 'NOT ALLOWED!',{'one':1,'two':2}),
]
self._tag_tester(tpls, TestTag)
def test_27_bad_names(self):
self.assertRaises(exceptions.BaseError, arguments.BaseArgument, True)
self.assertRaises(exceptions.BaseError, arguments.BaseArgument, True, False)
def test_99_middleware(self):
"""
This needs to be last because it modifies the global "builtins" store
"""
from django.conf import settings
from django.test.client import Client
from customtags.decorators import block
@block
def hello(context, nodelist, name="world", as_name="message"):
context.push()
try:
context[as_name] = "hello %s" % name
return nodelist.render(context)
finally:
context.pop()
return rendered
INSTALLED_APPS = ('customtags','customtags_tests',)
MIDDLEWARE_CLASSES = ('customtags.middleware.AddToBuiltinsMiddleware',)
old_apps = settings.INSTALLED_APPS
old_middleware = settings.MIDDLEWARE_CLASSES
settings.INSTALLED_APPS = INSTALLED_APPS
settings.MIDDLEWARE_CLASSES = MIDDLEWARE_CLASSES
c = Client()
response = c.get("/test/")
self.assertEqual(response.content, 'hello customtags\n')
response = c.get("/test2/")
self.assertEqual(response.content, 'INPUT\n')
settings.INSTALLED_APPS = old_apps
settings.MIDDLEWARE_CLASSES = old_middleware
| |
# Copyright 2013, Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
from unittest import mock
from unittest.mock import patch
import uuid
from oslo_limit import exception as ol_exc
from oslo_utils import encodeutils
from oslo_utils import units
from glance.common import exception
from glance.common import store_utils
import glance.quota
from glance.quota import keystone as ks_quota
from glance.tests.unit import fixtures as glance_fixtures
from glance.tests.unit import utils as unit_test_utils
from glance.tests import utils as test_utils
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
class FakeContext(object):
owner = 'someone'
is_admin = False
class FakeImage(object):
size = None
image_id = 'someid'
locations = [{'url': 'file:///not/a/path', 'metadata': {}}]
tags = set([])
def set_data(self, data, size=None, backend=None, set_active=True):
self.size = 0
for d in data:
self.size += len(d)
def __init__(self, **kwargs):
self.extra_properties = kwargs.get('extra_properties', {})
class TestImageQuota(test_utils.BaseTestCase):
def setUp(self):
super(TestImageQuota, self).setUp()
def _get_image(self, location_count=1, image_size=10):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'xyz'
base_image.size = image_size
image = glance.quota.ImageProxy(base_image, context, db_api, store)
locations = []
for i in range(location_count):
locations.append({'url': 'file:///g/there/it/is%d' % i,
'metadata': {}, 'status': 'active'})
image_values = {'id': 'xyz', 'owner': context.owner,
'status': 'active', 'size': image_size,
'locations': locations}
db_api.image_create(context, image_values)
return image
def test_quota_allowed(self):
quota = 10
self.config(user_storage_quota=str(quota))
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
data = '*' * quota
base_image.set_data(data, size=None)
image.set_data(data)
self.assertEqual(quota, base_image.size)
def _test_quota_allowed_unit(self, data_length, config_quota):
self.config(user_storage_quota=config_quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
data = '*' * data_length
base_image.set_data(data, size=None)
image.set_data(data)
self.assertEqual(data_length, base_image.size)
def test_quota_allowed_unit_b(self):
self._test_quota_allowed_unit(10, '10B')
def test_quota_allowed_unit_kb(self):
self._test_quota_allowed_unit(10, '1KB')
def test_quota_allowed_unit_mb(self):
self._test_quota_allowed_unit(10, '1MB')
def test_quota_allowed_unit_gb(self):
self._test_quota_allowed_unit(10, '1GB')
def test_quota_allowed_unit_tb(self):
self._test_quota_allowed_unit(10, '1TB')
def _quota_exceeded_size(self, quota, data,
deleted=True, size=None):
self.config(user_storage_quota=quota)
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
if deleted:
with patch.object(store_utils, 'safe_delete_from_backend'):
store_utils.safe_delete_from_backend(
context,
image.image_id,
base_image.locations[0])
self.assertRaises(exception.StorageQuotaFull,
image.set_data,
data,
size=size)
def test_quota_exceeded_no_size(self):
quota = 10
data = '*' * (quota + 1)
# NOTE(jbresnah) When the image size is None it means that it is
# not known. In this case the only time we will raise an
# exception is when there is no room left at all, thus we know
# it will not fit.
# That's why 'get_remaining_quota' is mocked with return_value = 0.
with patch.object(glance.api.common, 'get_remaining_quota',
return_value=0):
self._quota_exceeded_size(str(quota), data)
def test_quota_exceeded_with_right_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_b(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size('10B', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_right_size_kb(self):
quota = units.Ki
data = '*' * (quota + 1)
self._quota_exceeded_size('1KB', data, size=len(data),
deleted=False)
def test_quota_exceeded_with_lie_size(self):
quota = 10
data = '*' * (quota + 1)
self._quota_exceeded_size(str(quota), data, deleted=False,
size=quota - 1)
def test_quota_exceeded_keystone_quotas(self):
# Set our global limit to a tiny ten bytes
self.config(user_storage_quota='10B')
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = 'id'
image = glance.quota.ImageProxy(base_image, context, db_api, store)
# With keystone quotas disabled, a 100 byte image should fail the
# global limit.
data = '*' * 100
self.assertRaises(exception.StorageQuotaFull,
image.set_data,
data,
size=len(data))
# If we turn on keystone quotas, the global limit gets ignored
# so the same image no longer fails.
self.config(use_keystone_limits=True)
image.set_data(data, size=len(data))
def test_append_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.append(new_location)
pre_add_locations.append(new_location)
self.assertEqual(image.locations, pre_add_locations)
def test_insert_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.insert(0, new_location)
pre_add_locations.insert(0, new_location)
self.assertEqual(image.locations, pre_add_locations)
def test_extend_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations.extend([new_location])
pre_add_locations.extend([new_location])
self.assertEqual(image.locations, pre_add_locations)
def test_iadd_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
pre_add_locations = image.locations[:]
image.locations += [new_location]
pre_add_locations += [new_location]
self.assertEqual(image.locations, pre_add_locations)
def test_set_location(self):
new_location = {'url': 'file:///a/path', 'metadata': {},
'status': 'active'}
image = self._get_image()
image.locations = [new_location]
self.assertEqual(image.locations, [new_location])
def _make_image_with_quota(self, image_size=10, location_count=2):
quota = image_size * location_count
self.config(user_storage_quota=str(quota))
return self._get_image(image_size=image_size,
location_count=location_count)
def test_exceed_append_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.append,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_insert_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.insert,
0,
{'url': 'file:///a/path', 'metadata': {},
'status': 'active'})
def test_exceed_extend_location(self):
image = self._make_image_with_quota()
self.assertRaises(exception.StorageQuotaFull,
image.locations.extend,
[{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}])
def test_set_location_under(self):
image = self._make_image_with_quota(location_count=1)
image.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
def test_set_location_exceed(self):
image = self._make_image_with_quota(location_count=1)
try:
image.locations = [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'},
{'url': 'file:///a/path2', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_iadd_location_exceed(self):
image = self._make_image_with_quota(location_count=1)
try:
image.locations += [{'url': 'file:///a/path', 'metadata': {},
'status': 'active'}]
self.fail('Should have raised the quota exception')
except exception.StorageQuotaFull:
pass
def test_append_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.mock_object(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations.append({'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
def test_insert_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.mock_object(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations.insert(0,
{'url': 'file:///fake.img.tar.gz',
'metadata': {}})
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
def test_set_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.mock_object(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations = [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertEqual([{'url': 'file:///fake.img.tar.gz', 'metadata': {}}],
image.locations)
def test_iadd_location_for_queued_image(self):
context = FakeContext()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
base_image = FakeImage()
base_image.image_id = str(uuid.uuid4())
image = glance.quota.ImageProxy(base_image, context, db_api, store)
self.assertIsNone(image.size)
self.mock_object(store_api, 'get_size_from_backend',
unit_test_utils.fake_get_size_from_backend)
image.locations += [{'url': 'file:///fake.img.tar.gz', 'metadata': {}}]
self.assertIn({'url': 'file:///fake.img.tar.gz', 'metadata': {}},
image.locations)
class TestImagePropertyQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImagePropertyQuotas, self).setUp()
self.base_image = FakeImage()
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_mock.add.return_value = self.base_image
self.image_repo_mock.save.return_value = self.base_image
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_save_image_with_image_property(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
def test_save_image_too_many_image_properties(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.ImagePropertyLimitExceeded,
self.image_repo_proxy.save, self.image)
self.assertIn("Attempted: 2, Maximum: 1",
encodeutils.exception_to_unicode(exc))
def test_save_image_unlimited_image_properties(self):
self.config(image_property_quota=-1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
def test_add_image_with_image_property(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.add(self.image)
self.image_repo_mock.add.assert_called_once_with(self.base_image)
def test_add_image_too_many_image_properties(self):
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'bar', 'foo2': 'bar2'}
exc = self.assertRaises(exception.ImagePropertyLimitExceeded,
self.image_repo_proxy.add, self.image)
self.assertIn("Attempted: 2, Maximum: 1",
encodeutils.exception_to_unicode(exc))
def test_add_image_unlimited_image_properties(self):
self.config(image_property_quota=-1)
self.image.extra_properties = {'foo': 'bar'}
self.image_repo_proxy.add(self.image)
self.image_repo_mock.add.assert_called_once_with(self.base_image)
def _quota_exceed_setup(self):
self.config(image_property_quota=2)
self.base_image.extra_properties = {'foo': 'bar', 'spam': 'ham'}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_modify_image_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(image_property_quota=1)
self.image.extra_properties = {'foo': 'frob', 'spam': 'eggs'}
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertEqual('frob', self.base_image.extra_properties['foo'])
self.assertEqual('eggs', self.base_image.extra_properties['spam'])
def test_delete_image_properties_when_quota_exceeded(self):
self._quota_exceed_setup()
self.config(image_property_quota=1)
del self.image.extra_properties['foo']
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertNotIn('foo', self.base_image.extra_properties)
self.assertEqual('ham', self.base_image.extra_properties['spam'])
def test_invalid_quota_config_parameter(self):
self.config(user_storage_quota='foo')
location = {"url": "file:///fake.img.tar.gz", "metadata": {}}
self.assertRaises(exception.InvalidOptionValue,
self.image.locations.append, location)
def test_exceed_quota_during_patch_operation(self):
self._quota_exceed_setup()
self.image.extra_properties['frob'] = 'baz'
self.image.extra_properties['lorem'] = 'ipsum'
self.assertEqual('bar', self.base_image.extra_properties['foo'])
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertEqual('baz', self.base_image.extra_properties['frob'])
self.assertEqual('ipsum', self.base_image.extra_properties['lorem'])
del self.image.extra_properties['frob']
del self.image.extra_properties['lorem']
self.image_repo_proxy.save(self.image)
call_args = mock.call(self.base_image, from_state=None)
self.assertEqual(call_args, self.image_repo_mock.save.call_args)
self.assertEqual('bar', self.base_image.extra_properties['foo'])
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertNotIn('frob', self.base_image.extra_properties)
self.assertNotIn('lorem', self.base_image.extra_properties)
def test_quota_exceeded_after_delete_image_properties(self):
self.config(image_property_quota=3)
self.base_image.extra_properties = {'foo': 'bar',
'spam': 'ham',
'frob': 'baz'}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.config(image_property_quota=1)
del self.image.extra_properties['foo']
self.image_repo_proxy.save(self.image)
self.image_repo_mock.save.assert_called_once_with(self.base_image,
from_state=None)
self.assertNotIn('foo', self.base_image.extra_properties)
self.assertEqual('ham', self.base_image.extra_properties['spam'])
self.assertEqual('baz', self.base_image.extra_properties['frob'])
class TestImageTagQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageTagQuotas, self).setUp()
self.base_image = mock.Mock()
self.base_image.tags = set([])
self.base_image.extra_properties = {}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_replace_image_tag(self):
self.config(image_tag_quota=1)
self.image.tags = ['foo']
self.assertEqual(1, len(self.image.tags))
def test_replace_too_many_image_tags(self):
self.config(image_tag_quota=0)
exc = self.assertRaises(exception.ImageTagLimitExceeded,
setattr, self.image, 'tags', ['foo', 'bar'])
self.assertIn('Attempted: 2, Maximum: 0',
encodeutils.exception_to_unicode(exc))
self.assertEqual(0, len(self.image.tags))
def test_replace_unlimited_image_tags(self):
self.config(image_tag_quota=-1)
self.image.tags = ['foo']
self.assertEqual(1, len(self.image.tags))
def test_add_image_tag(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
self.assertEqual(1, len(self.image.tags))
def test_add_too_many_image_tags(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
exc = self.assertRaises(exception.ImageTagLimitExceeded,
self.image.tags.add, 'bar')
self.assertIn('Attempted: 2, Maximum: 1',
encodeutils.exception_to_unicode(exc))
def test_add_unlimited_image_tags(self):
self.config(image_tag_quota=-1)
self.image.tags.add('foo')
self.assertEqual(1, len(self.image.tags))
def test_remove_image_tag_while_over_quota(self):
self.config(image_tag_quota=1)
self.image.tags.add('foo')
self.assertEqual(1, len(self.image.tags))
self.config(image_tag_quota=0)
self.image.tags.remove('foo')
self.assertEqual(0, len(self.image.tags))
class TestQuotaImageTagsProxy(test_utils.BaseTestCase):
def setUp(self):
super(TestQuotaImageTagsProxy, self).setUp()
def test_add(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
proxy.add('foo')
self.assertIn('foo', proxy)
def test_add_too_many_tags(self):
self.config(image_tag_quota=0)
proxy = glance.quota.QuotaImageTagsProxy(set([]))
exc = self.assertRaises(exception.ImageTagLimitExceeded,
proxy.add, 'bar')
self.assertIn('Attempted: 1, Maximum: 0',
encodeutils.exception_to_unicode(exc))
def test_equals(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
self.assertEqual(set([]), proxy)
def test_not_equals(self):
proxy = glance.quota.QuotaImageTagsProxy(set([]))
self.assertNotEqual('foo', proxy)
def test_contains(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['foo']))
self.assertIn('foo', proxy)
def test_len(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['foo',
'bar',
'baz',
'niz']))
self.assertEqual(4, len(proxy))
def test_iter(self):
items = set(['foo', 'bar', 'baz', 'niz'])
proxy = glance.quota.QuotaImageTagsProxy(items.copy())
self.assertEqual(4, len(items))
for item in proxy:
items.remove(item)
self.assertEqual(0, len(items))
def test_tags_attr_no_loop(self):
proxy = glance.quota.QuotaImageTagsProxy(None)
self.assertEqual(set([]), proxy.tags)
def test_tags_deepcopy(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['a', 'b']))
proxy_copy = copy.deepcopy(proxy)
self.assertEqual(set(['a', 'b']), proxy_copy.tags)
self.assertIn('a', proxy_copy)
# remove is a found via __getattr__
proxy_copy.remove('a')
self.assertNotIn('a', proxy_copy)
def test_tags_delete(self):
proxy = glance.quota.QuotaImageTagsProxy(set(['a', 'b']))
self.assertEqual(set(['a', 'b']), proxy.tags)
del proxy.tags
self.assertIsNone(proxy.tags)
class TestImageMemberQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMemberQuotas, self).setUp()
db_api = unit_test_utils.FakeDB()
store_api = unit_test_utils.FakeStoreAPI()
store = unit_test_utils.FakeStoreUtils(store_api)
context = FakeContext()
self.image = mock.Mock()
self.base_image_member_factory = mock.Mock()
self.image_member_factory = glance.quota.ImageMemberFactoryProxy(
self.base_image_member_factory, context,
db_api, store)
def test_new_image_member(self):
self.config(image_member_quota=1)
self.image_member_factory.new_image_member(self.image,
'fake_id')
nim = self.base_image_member_factory.new_image_member
nim.assert_called_once_with(self.image, 'fake_id')
def test_new_image_member_unlimited_members(self):
self.config(image_member_quota=-1)
self.image_member_factory.new_image_member(self.image,
'fake_id')
nim = self.base_image_member_factory.new_image_member
nim.assert_called_once_with(self.image, 'fake_id')
def test_new_image_member_too_many_members(self):
self.config(image_member_quota=0)
self.assertRaises(exception.ImageMemberLimitExceeded,
self.image_member_factory.new_image_member,
self.image, 'fake_id')
class TestImageLocationQuotas(test_utils.BaseTestCase):
def setUp(self):
super(TestImageLocationQuotas, self).setUp()
self.base_image = mock.Mock()
self.base_image.locations = []
self.base_image.size = 1
self.base_image.extra_properties = {}
self.image = glance.quota.ImageProxy(self.base_image,
mock.Mock(),
mock.Mock(),
mock.Mock())
self.image_repo_mock = mock.Mock()
self.image_repo_proxy = glance.quota.ImageRepoProxy(
self.image_repo_mock,
mock.Mock(),
mock.Mock(),
mock.Mock())
def test_replace_image_location(self):
self.config(image_location_quota=1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}
}]
self.assertEqual(1, len(self.image.locations))
def test_replace_too_many_image_locations(self):
self.config(image_location_quota=1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}}
]
locations = [
{"url": "file:///fake1.img.tar.gz", "metadata": {}},
{"url": "file:///fake2.img.tar.gz", "metadata": {}},
{"url": "file:///fake3.img.tar.gz", "metadata": {}}
]
exc = self.assertRaises(exception.ImageLocationLimitExceeded,
setattr, self.image, 'locations', locations)
self.assertIn('Attempted: 3, Maximum: 1',
encodeutils.exception_to_unicode(exc))
self.assertEqual(1, len(self.image.locations))
def test_replace_unlimited_image_locations(self):
self.config(image_location_quota=-1)
self.image.locations = [{"url": "file:///fake.img.tar.gz",
"metadata": {}}
]
self.assertEqual(1, len(self.image.locations))
def test_add_image_location(self):
self.config(image_location_quota=1)
location = {"url": "file:///fake.img.tar.gz", "metadata": {}}
self.image.locations.append(location)
self.assertEqual(1, len(self.image.locations))
def test_add_too_many_image_locations(self):
self.config(image_location_quota=1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
location2 = {"url": "file:///fake2.img.tar.gz", "metadata": {}}
exc = self.assertRaises(exception.ImageLocationLimitExceeded,
self.image.locations.append, location2)
self.assertIn('Attempted: 2, Maximum: 1',
encodeutils.exception_to_unicode(exc))
def test_add_unlimited_image_locations(self):
self.config(image_location_quota=-1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
self.assertEqual(1, len(self.image.locations))
def test_remove_image_location_while_over_quota(self):
self.config(image_location_quota=1)
location1 = {"url": "file:///fake1.img.tar.gz", "metadata": {}}
self.image.locations.append(location1)
self.assertEqual(1, len(self.image.locations))
self.config(image_location_quota=0)
self.image.locations.remove(location1)
self.assertEqual(0, len(self.image.locations))
class TestImageKeystoneQuota(test_utils.BaseTestCase):
def setUp(self):
super(TestImageKeystoneQuota, self).setUp()
default_limits = {
ks_quota.QUOTA_IMAGE_SIZE_TOTAL: 500,
'another_limit': 2,
}
ksqf = glance_fixtures.KeystoneQuotaFixture(**default_limits)
self.useFixture(ksqf)
self.db_api = unit_test_utils.FakeDB()
self.useFixture(fixtures.MockPatch('glance.quota.keystone.db',
self.db_api))
def _create_fake_image(self, context, size):
location_count = 2
locations = []
for i in range(location_count):
locations.append({'url': 'file:///g/there/it/is%d' % i,
'status': 'active',
'metadata': {}})
image_values = {'id': str(uuid.uuid4()), 'owner': context.owner,
'status': 'active', 'size': size * units.Mi,
'locations': locations}
self.db_api.image_create(context, image_values)
def test_enforce_overquota(self):
# Check that a single large image with multiple locations will
# trip the quota check.
self.config(use_keystone_limits=True)
context = FakeContext()
self._create_fake_image(context, 300)
exc = self.assertRaises(exception.LimitExceeded,
ks_quota.enforce_image_size_total,
context, context.owner)
self.assertIn('image_size_total is over limit of 500', str(exc))
def test_enforce_overquota_with_delta(self):
# Check that delta is honored, if used.
self.config(use_keystone_limits=True)
context = FakeContext()
self._create_fake_image(context, 200)
ks_quota.enforce_image_size_total(context, context.owner)
ks_quota.enforce_image_size_total(context, context.owner,
delta=50)
self.assertRaises(exception.LimitExceeded,
ks_quota.enforce_image_size_total,
context, context.owner, delta=200)
def test_enforce_overquota_disabled(self):
# Just like the overquota case above, but without being enabled,
# so no failure
self.config(use_keystone_limits=False)
context = FakeContext()
self._create_fake_image(context, 300)
# Does not raise because keystone limits are disabled
ks_quota.enforce_image_size_total(context, context.owner)
def test_enforce_overquota_multiple(self):
# Check that multiple images with a combined amount
# (2*2*150=600) over the quota will trip the quota check.
self.config(use_keystone_limits=True)
context = FakeContext()
self._create_fake_image(context, 150)
self._create_fake_image(context, 150)
exc = self.assertRaises(exception.LimitExceeded,
ks_quota.enforce_image_size_total,
context, context.owner)
self.assertIn('image_size_total is over limit of 500', str(exc))
def test_enforce_underquota(self):
self.config(use_keystone_limits=True)
context = FakeContext()
self._create_fake_image(context, 100)
# We are under quota, so no exception expected
ks_quota.enforce_image_size_total(context, context.owner)
def test_enforce_underquota_with_others_over_quota(self):
self.config(use_keystone_limits=True)
# Put the first tenant over quota
context = FakeContext()
self._create_fake_image(context, 300)
self._create_fake_image(context, 300)
# Create an image for another tenant that is not over quota
other_context = FakeContext()
other_context.owner = 'someone_else'
self._create_fake_image(other_context, 100)
# This tenant should pass the quota check, because it is under quota,
# even though the other is over.
ks_quota.enforce_image_size_total(other_context, other_context.owner)
def test_enforce_multiple_limits_under_quota(self):
self.config(use_keystone_limits=True)
context = FakeContext()
# Make sure that we can call the multi-limit handler and pass when
# we are under quota.
ks_quota._enforce_some(context, context.owner,
{ks_quota.QUOTA_IMAGE_SIZE_TOTAL: lambda: 200,
'another_limit': lambda: 1},
{'another_limit': 1})
def test_enforce_multiple_limits_over_quota(self):
self.config(use_keystone_limits=True)
context = FakeContext()
# Make sure that even if one of a multi-limit call is over
# quota, we get the exception.
self.assertRaises(exception.LimitExceeded,
ks_quota._enforce_some,
context, context.owner,
{ks_quota.QUOTA_IMAGE_SIZE_TOTAL: lambda: 200,
'another_limit': lambda: 1},
{'another_limit': 5})
@mock.patch('oslo_limit.limit.Enforcer')
@mock.patch.object(ks_quota, 'LOG')
def test_oslo_limit_config_fail(self, mock_LOG, mock_enforcer):
self.config(use_keystone_limits=True)
mock_enforcer.return_value.enforce.side_effect = (
ol_exc.SessionInitError('test'))
context = FakeContext()
self._create_fake_image(context, 100)
self.assertRaises(ol_exc.SessionInitError,
ks_quota.enforce_image_size_total,
context, context.owner)
mock_LOG.error.assert_called_once_with(
'Failed to initialize oslo_limit, likely due to '
'incorrect or insufficient configuration: %(err)s',
{'err': "Can't initialise OpenStackSDK session: test."})
| |
import pytz
from django.contrib.auth import authenticate, login
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
import requests
from django.views import View
from django.views.generic import TemplateView
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework.parsers import JSONParser
import json
import calendar
import stripe
import datetime
from src.apps.program.models import ProgramUsers
from django.conf import settings
from src.apps.question.views import SaveQuestions
from src.apps.users.models import User
from .serializers import GetdatesSerializers, GetTimesSerializers, GetDateTimeSerializers
# Create your views here.
class getAvailableDates(APIView):
def get(self, request, *args, **kwargs):
# need to make a call to the sceduleing company and get a response.
print(request.query_params)
year = request.query_params['year']
month = request.query_params['month']
dates = requests.get('https://acuityscheduling.com/api/v1/availability/dates?appointmentTypeID=3459580&month={}-{}'.format(year, month),
auth=('13551534', '743460668658bf53bca1ca7127ad829b'))
#dates ="[{\"date\":\"2017-12-12\"}]"
d = GetdatesSerializers(data=dates.json(), many=True)
daysInMonth = calendar.monthrange(2017, 12)[1] + 1
validDays = []
notValidDays = []
if(d.is_valid()):
for date in d.validated_data:
validDays.append(date['date'].day)
for d in range(1,daysInMonth):
if d not in validDays:
notValidDays.append(d)
return Response(notValidDays, status=status.HTTP_200_OK)
class GetAvailableTimes(APIView):
def get(self, request, *args, **kwargs):
date = request.query_params['date']
data = {}
data['date'] = date
dt = GetDateTimeSerializers(data=data)
eastCoast = datetime.timedelta(hours=5)
if dt.is_valid():
dateString = dt.validated_data.get('date')
dateString = dateString - eastCoast
dateString = dateString.strftime('%Y-%m-%d')
times = requests.get(
'https://acuityscheduling.com/api/v1/availability/times?appointmentTypeID=3459580&date={}'.format(dateString),
auth=('13551534', '743460668658bf53bca1ca7127ad829b'))
print(times.json())
t = GetTimesSerializers(data=times.json(), many=True)
listOTimes = []
listTimes = []
if t.is_valid():
timezone = 'America/New_York'
#localDatetime = utcmoment.astimezone(pytz.timezone(timezone))
listOTimes = [i['time'].astimezone(pytz.timezone(timezone)) for i in t.validated_data]
print(listOTimes)
return Response(listOTimes, status=status.HTTP_200_OK)
class MakePayment(APIView):
def post(self, request, *args, **kwargs):
payload = request.data['params']['tok']
userEmail = request.data['params']['userEmail']
# fake a user for testing
user = User.objects.get(email=userEmail)
p = ProgramUsers.objects.get(user=user, is_active=True)
amount = p.program.cost
amount = int(amount) * 100
stripe.api_key = settings.STRIPE_SECRET_KEY
customer = stripe.Customer.create(
email=user.email,
source=payload
)
charge = stripe.Charge.create(
customer=customer.id,
amount=amount,
currency='CAD',
description='Program Charge from BODZII'
)
if charge['paid']:
p.hasPaid = True
p.save()
# if this comes back succesfull, then update the user to paid
return Response(charge, status=status.HTTP_200_OK)
class loadInitialQuestions(APIView):
def post(self, request, *args, **kwargs):
print(request.data)
obj = {
'fat': 100,
'protien': 180,
'carbs': 302,
'calories': 3400
}
return Response(obj, status=status.HTTP_200_OK)
# this is the class that prints the HTML so that the JS can pick it up
class CreateNewUserForm(TemplateView):
template_name = 'onboarding/initialQuestions.html'
class InitialFormComplete(TemplateView):
template_name = 'onboarding/success.html'
class setPassword(TemplateView):
template_name = 'onboarding/setPassword.html'
def post(self, request, *args, **kwargs):
user = request.COOKIES.get('email')
password = request.POST.get('setPasswordField')
# TODO: add a serlizer to this
u = User.objects.get(email=user)
u.set_password(password)
u.save()
person = authenticate(email=u.email, password=password)
if person is not None:
login(request, person)
return redirect('users:redirect')
return HttpResponseRedirect('/onboarding/setPassword')
class lockPassword(APIView):
def post(self, request, *args, **kwargs):
user = request.COOKIES.get('email')
password = request.data['passowrd']
# TODO: add a serlizer to this
u = User.objects.get(email=user)
u.set_password(password)
u.save()
user = authenticate(username=u.email, password=password)
return redirect('')
class SetAppointment(APIView):
def post(self, request, *args, **kwargs):
payload = {
'datetime': request.data['datetime'],
'appointmentTypeID': 3459580,
}
payload = json.dumps(payload)
answer = requests.post(
'https://acuityscheduling.com/api/v1/availability/check-times',
auth=('13551534', '743460668658bf53bca1ca7127ad829b'),
data=payload
)
answer = json.loads(answer.text)
user = request.user
payload = {
'datetime': request.data['datetime'],
'appointmentTypeID': 3459580,
'firstName': user.first_name,
'lastName':user.last_name,
'email': user.email
}
payload = json.dumps(payload)
if answer['valid']:
res = requests.post(
'https://acuityscheduling.com/api/v1/appointments',
auth=('13551534', '743460668658bf53bca1ca7127ad829b'),
data=payload
)
res = json.loads(res.text)
print(res)
if res.get('id', None):
#need to set the appointmentdate in our system
pu = ProgramUsers.objects.get(user=user, is_active=True)
pu.consultTime = request.data['datetime']
pu.is_reg_complete = True
pu.save()
return Response({'confirmed': True}, status=status.HTTP_200_OK)
return Response({'confirmed': False}, status=status.HTTP_200_OK)
return Response({'confirmed': False}, status=status.HTTP_400_BAD_REQUEST)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2017
""" Common functionalities for the Backup, they are also used for the integration with the API.
"""
import os
import uuid
from foglamp.services.core import server
from foglamp.common.storage_client import payload_builder
from foglamp.common import logger
import foglamp.plugins.storage.common.lib as lib
import foglamp.plugins.storage.common.exceptions as exceptions
__author__ = "Stefano Simonelli"
__copyright__ = "Copyright (c) 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_MESSAGES_LIST = {
# Information messages
"i000001": "Execution started.",
"i000002": "Execution completed.",
# Warning / Error messages
"e000001": "cannot initialize the logger - error details |{0}|",
"e000002": "an error occurred during the backup operation - error details |{0}|",
"e000004": "cannot complete the initialization - error details |{0}|",
}
""" Messages used for Information, Warning and Error notice """
# Log definitions
_logger = None
_LOG_LEVEL_DEBUG = 10
_LOG_LEVEL_INFO = 20
_LOGGER_LEVEL = _LOG_LEVEL_INFO
_LOGGER_DESTINATION = logger.SYSLOG
class Backup(object):
""" Provides external functionality/integration for Backup operations
the constructor expects to receive a reference to a StorageClient object to being able to access
the Storage Layer
"""
_MODULE_NAME = "foglamp_backup_common"
_SCHEDULE_BACKUP_ON_DEMAND = "fac8dae6-d8d1-11e7-9296-cec278b6b50a"
_MESSAGES_LIST = {
# Information messages
"i000000": "general information",
"i000003": "On demand backup successfully launched.",
# Warning / Error messages
"e000000": "general error",
"e000001": "cannot delete/purge backup file on file system - id |{0}| - file name |{1}| error details |{2}|",
"e000002": "cannot delete/purge backup information on the storage layer "
"- id |{0}| - file name |{1}| error details |{2}|",
"e000003": "cannot retrieve information for the backup id |{0}|",
"e000004": "cannot launch on demand backup - error details |{0}|",
}
""" Messages used for Information, Warning and Error notice """
_logger = None
STORAGE_TABLE_BACKUPS = None
def __init__(self, _storage):
self._storage = _storage
if not Backup._logger:
Backup._logger = logger.setup(self._MODULE_NAME,
destination=_LOGGER_DESTINATION,
level=_LOGGER_LEVEL)
self._backup_lib = lib.BackupRestoreLib(self._storage, self._logger)
self.STORAGE_TABLE_BACKUPS = self._backup_lib.STORAGE_TABLE_BACKUPS
async def get_all_backups(
self,
limit: int,
skip: int,
status: [lib.BackupStatus, None],
sort_order: lib.SortOrder = lib.SortOrder.DESC) -> list:
""" Returns a list of backups is returned sorted in chronological order with the most recent backup first.
Args:
limit: int - limit the number of backups returned to the number given
skip: int - skip the number of backups specified before returning backups-
this, in conjunction with the limit option, allows for a paged interface to be built
status: lib.BackupStatus - limit the returned backups to those only with the specified status,
None = retrieves information for all the backups
sort_order: lib.SortOrder - Defines the order used to present information, DESC by default
Returns:
backups_information: all the information available related to the requested backups
Raises:
"""
payload = payload_builder.PayloadBuilder().SELECT("id", "status", "ts", "file_name", "type") \
.ALIAS("return", ("ts", 'ts')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS"))
if status:
payload.WHERE(['status', '=', status])
backups_from_storage = await self._storage.query_tbl_with_payload(self.STORAGE_TABLE_BACKUPS, payload.payload())
backups_information = backups_from_storage['rows']
return backups_information
async def get_backup_details(self, backup_id: int) -> dict:
""" Returns the details of a backup
Args:
backup_id: int - the id of the backup to return
Returns:
backup_information: all the information available related to the requested backup_id
Raises:
exceptions.DoesNotExist
exceptions.NotUniqueBackup
"""
payload = payload_builder.PayloadBuilder().SELECT("id", "status", "ts", "file_name", "type") \
.ALIAS("return", ("ts", 'ts')).FORMAT("return", ("ts", "YYYY-MM-DD HH24:MI:SS.MS")) \
.WHERE(['id', '=', backup_id]).payload()
backup_from_storage = await self._storage.query_tbl_with_payload(self.STORAGE_TABLE_BACKUPS, payload)
if backup_from_storage['count'] == 0:
raise exceptions.DoesNotExist
elif backup_from_storage['count'] == 1:
backup_information = backup_from_storage['rows'][0]
else:
raise exceptions.NotUniqueBackup
return backup_information
async def delete_backup(self, backup_id: int):
""" Deletes a backup
Args:
backup_id: int - the id of the backup to delete
Returns:
Raises:
"""
try:
backup_information = await self.get_backup_details(backup_id)
file_name = backup_information['file_name']
# Deletes backup file from the file system
if os.path.exists(file_name):
try:
os.remove(file_name)
except Exception as _ex:
_message = self._MESSAGES_LIST["e000001"].format(backup_id, file_name, _ex)
Backup._logger.error(_message)
raise
# Deletes backup information from the Storage layer
# only if it was possible to delete the file from the file system
try:
await self._delete_backup_information(backup_id)
except Exception as _ex:
_message = self._MESSAGES_LIST["e000002"].format(backup_id, file_name, _ex)
self._logger.error(_message)
raise
except exceptions.DoesNotExist:
_message = self._MESSAGES_LIST["e000003"].format(backup_id)
self._logger.warning(_message)
raise
async def _delete_backup_information(self, _id):
""" Deletes backup information from the Storage layer
Args:
_id: Backup id to delete
Returns:
Raises:
"""
payload = payload_builder.PayloadBuilder() \
.WHERE(['id', '=', _id]) \
.payload()
await self._storage.delete_from_tbl(self.STORAGE_TABLE_BACKUPS, payload)
async def create_backup(self):
""" Run a backup task using the scheduler on-demand schedule mechanism to run the script,
the backup will proceed asynchronously.
Args:
Returns:
status: str - {"running"|"failed"}
Raises:
"""
self._logger.debug("{func}".format(func="create_backup"))
try:
await server.Server.scheduler.queue_task(uuid.UUID(Backup._SCHEDULE_BACKUP_ON_DEMAND))
_message = self._MESSAGES_LIST["i000003"]
Backup._logger.info("{0}".format(_message))
status = "running"
except Exception as _ex:
_message = self._MESSAGES_LIST["e000004"].format(_ex)
Backup._logger.error("{0}".format(_message))
status = "failed"
return status
| |
#!/usr/bin/env python
__author__ = "Timothy Tickle"
__copyright__ = "Copyright 2014"
__credits__ = [ "Timothy Tickle" ]
__license__ = "MIT"
__maintainer__ = "Timothy Tickle"
__email__ = "ttickle@broadinstitute.org"
__status__ = "Development"
import matplotlib.pyplot as plt
import quickPlot as qp
class BarChart( qp.QuickPlot ):
def __init__( self ):
qp.QuickPlot.__init__( self )
def func_plot( self, json_data, str_output_figure ):
"""
Function that quickly plots a bar chart of data in a json file.
* json_data : JSON object or a dict
: Object to plot
* str_output_figure : String path
: Figure to plot
* return : boolean
: True indicates success
"""
# Get annotations
str_title = json_data.get( qp.c_STR_TITLE, qp.c_STR_TITLE_DEFAULT )
str_x_title = json_data.get( qp.c_STR_X_AXIS, qp.c_STR_X_AXIS_DEFAULT )
str_y_title = json_data.get( qp.c_STR_Y_AXIS, qp.c_STR_Y_AXIS_DEFAULT )
i_y_limit = int( json_data[qp.c_STR_Y_LIMIT ] ) if qp.c_STR_Y_LIMIT in json_data else None
str_label_sort = json_data.get( qp.c_STR_SORT, None )
# Max value in all series
i_max_count = 0
# Data tick labels (set by the first label instance that is not none)
lstr_data_xticks = None
# Global labels in case some data have different labels
lstr_global_labels, f_update_labels, dict_label_instances = self.func_get_consistent_x_ticks( json_data, str_label_sort )
# Width of the bars are dependent on the how many bar groups are given ( how many series of bar values are given )
if f_update_labels:
i_len_data_list = max( [ i_count for i_count in dict_label_instances.values()[ 0 ] ] )
else:
i_len_data_list = len( json_data[ qp.c_STR_DATA ] )
i_bar_width = 1.0 / ( i_len_data_list + 1.0 ) if i_len_data_list > 1.0 else 0.5
# make index between 1 and N
# Also if there are no groups but each is a single bar add .25 to make the label center under the bar
li_index = range( 1, len( lstr_global_labels ) + 1 )
if i_len_data_list == 1:
li_index = [ i_index + 0.25 for i_index in li_index ]
# Get data
for dict_data in json_data[ qp.c_STR_DATA ]:
# Update the max value
i_max_count = max( [ i_max_count, max( dict_data[ qp.c_STR_DATA ] ) ] )
# Get data specific information
li_data = dict_data [ qp.c_STR_DATA ]
str_data_label = dict_data[ qp.c_STR_DATA_LABEL ] if qp.c_STR_DATA_LABEL in dict_data else None
c_color = dict_data[ qp.c_C_PLOT_COLOR ] if qp.c_C_PLOT_COLOR in dict_data else qp.c_C_PLOT_COLOR_DEFAULT
li_error_values = dict_data[ qp.c_STR_ERROR_VALUES ] if qp.c_STR_ERROR_VALUES in dict_data else None
# Get x ticks
if not lstr_data_xticks:
lstr_data_xticks = dict_data[ qp.c_STR_X_TICK_LABEL ] if qp.c_STR_X_TICK_LABEL in dict_data else None
# If updating the data to a global x ticks label is needed do that here
li_cur_index, i_bar_width = [ li_index, i_bar_width ] if not f_update_labels else self.func_get_index_for_labels( lstr_global_labels, lstr_data_xticks, i_len_data_list, dict_label_instances )
bar_cur = plt.bar( li_cur_index, li_data,
width=i_bar_width, color=c_color,
yerr=li_error_values, label=str_data_label )
self.func_label_bars( bar_cur, li_error_values, plt.gca() )
# Need to set to None so it will be refreshed
# Or update the indices, cleanup to do at the end depending on the mode
if f_update_labels:
lstr_data_xticks = None
else:
li_index = [ i_cur_index + i_bar_width for i_cur_index in li_index ]
# Annotate plots
# Change y limits
plt.ylim( 0, i_max_count + ( i_max_count * .1 ) )
plt.title( str_title )
plt.xlabel( str_x_title )
plt.ylabel( str_y_title )
# Add .5 to center the labels to the groups
plt.xticks( [ i_index + 0.5 for i_index in range( 1, len( li_index ) + 1 ) ], lstr_global_labels if f_update_labels else lstr_data_xticks )
plt.legend()
plt.tight_layout()
if not i_y_limit is None:
plt.ylim( 0, i_y_limit )
plt.savefig( str_output_figure )
plt.close()
return True
def func_get_consistent_x_ticks(self, json_cur, str_sort_type = None ):
"""
Series of data may have different labels.
Combining those labels to a global x tick label set
is implemented here for convenience.
"""
# Global set of labels to compile
sstr_global_labels = None
# Indicates if an update will be needed given the labels
# If there where no changes to the labels logic can be avoided and
# Faster plotting can occur
f_updates_are_needed = False
# Measures the maximum number of data associated to a label
# This helps to know how to measure the columns
dict_label_instances_counter = {}
# Get a combined set of labels
for dict_data in json_cur[ qp.c_STR_DATA ]:
# Get the current data's tick labels
lstr_labels_cur = dict_data.get( qp.c_STR_X_TICK_LABEL, None )
# Skip a data without labels
# It is assumed that these labels mirror an already given set or set to be given
# It is assumed this does not happen if mismatched x tick labels are given
if lstr_labels_cur is None:
continue
# We received a label set
sstr_labels_cur = set( lstr_labels_cur )
# If this is the first time a label set is received
# Use that set
# Check to see if there are any differences in the global label set and the current
# If there are then updating data and errors and such have to happen do to subsets of label sets
# Being given instead of a consistent label set.
if sstr_global_labels is None:
sstr_global_labels = sstr_labels_cur
else:
if not len( sstr_global_labels ) == len( sstr_labels_cur ):
f_updates_are_needed = True
# Update the global union of all sets received
sstr_global_labels.update( sstr_labels_cur )
# Update labels dictionary
for str_label in sstr_labels_cur:
dict_label_instances_counter[ str_label ] = dict_label_instances_counter.get(str_label, 0 ) + 1
# Add the bar widths to the label count dict
#"{ 'str_label' : [ i_count, i_bar_width ] }"
for str_key, i_value in dict_label_instances_counter.iteritems():
dict_label_instances_counter[ str_key ] = [ i_value, 1.0 / ( i_value + 1 ) ]
# If sorting
lstr_global_labels = list( sstr_global_labels )
if str_sort_type:
if str_sort_type == qp.c_STR_SORT_NUMERICALLY:
lstr_global_labels = sorted( [ int( str_label ) for str_label in lstr_global_labels ] )
lstr_global_labels = [ str( i_label ) for i_label in lstr_global_labels ]
elif str_sort_type == qp.c_STR_SORT_LEX:
lstr_global_labels.sort()
return [ lstr_global_labels, f_updates_are_needed, dict_label_instances_counter ]
def func_label_bars(self, ptch_bars, li_errors, ax_cur ):
"""
Add heights as labels on bars.
* ptch_bars : List of ax.bar
: The bars to label
* li_errors : List of integers
: List of errors
* ax_cur : Axes associated with the bars
: Current axes
"""
if li_errors is None:
li_errors = [ 0 ] * len( ptch_bars )
for ptch_cur, i_error in zip( ptch_bars, li_errors ):
i_height = ptch_cur.get_height()
ax_cur.text( ptch_cur.get_x()+ptch_cur.get_width()/2.0, ( ( i_height + i_error ) + 0.25 ), '%d'%int(i_height), ha="center", va="bottom" )
def func_get_index_for_labels( self, lstr_master_labels, lstr_labels_cur, i_data_group_count, dict_group_count ):
"""
This assumes all of the labels of the lstr_labels_cur are in the lstr_master_labels and the lstr_labels_cur has only unique values .
"""
#"{ 'str_label' : [ i_count, i_bar_width ] }"
if not lstr_labels_cur:
return lstr_labels_cur
li_update_indices = []
li_bar_widths = []
for str_label in lstr_labels_cur:
i_translated_index = lstr_master_labels.index( str_label )
i_singleton_fudge = 1.0
i_mlt_group_adjust = dict_group_count[ str_label ][ 1 ] * ( dict_group_count[ str_label ][ 0 ] - 1 )
if dict_group_count[ str_label ][ 0 ] == 1:
i_singleton_fudge = 1.25
else:
i_mlt_group_adjust = i_mlt_group_adjust + 0.25
li_update_indices.append( i_translated_index + i_singleton_fudge + i_mlt_group_adjust )
dict_group_count[ str_label ] = [ dict_group_count[ str_label ][ 0 ] - 1.0, dict_group_count[ str_label ][ 1 ] ]
li_bar_widths.append( dict_group_count[ str_label ][ 1 ] )
return [ li_update_indices, li_bar_widths ]
if __name__ == "__main__":
BarChart().func_make_figure()
| |
# Copyright (c) 2014 Alex Meade. All rights reserved.
# Copyright (c) 2014 Clinton Knight. All rights reserved.
# Copyright (c) 2015 Tom Barron. All rights reserved.
# Copyright (c) 2016 Mike Rooney. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
LOG = logging.getLogger(__name__)
DELETED_PREFIX = 'deleted_cinder_'
@six.add_metaclass(utils.TraceWrapperMetaclass)
class Client(object):
def __init__(self, **kwargs):
host = kwargs['hostname']
username = kwargs['username']
password = kwargs['password']
self.connection = netapp_api.NaServer(
host=host,
transport_type=kwargs['transport_type'],
port=kwargs['port'],
username=username,
password=password)
self.ssh_client = self._init_ssh_client(host, username, password)
def _init_ssh_client(self, host, username, password):
return netapp_api.SSHUtil(
host=host,
username=username,
password=password)
def _init_features(self):
"""Set up the repository of available Data ONTAP features."""
self.features = na_utils.Features()
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
if cached:
return self.connection.get_api_version()
ontapi_version = netapp_api.NaElement('system-get-ontapi-version')
res = self.connection.invoke_successfully(ontapi_version, False)
major = res.get_child_content('major-version')
minor = res.get_child_content('minor-version')
return major, minor
def _strip_xml_namespace(self, string):
if string.startswith('{') and '}' in string:
return string.split('}', 1)[1]
return string
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def send_request(self, api_name, api_args=None, enable_tunneling=True):
"""Sends request to Ontapi."""
request = netapp_api.NaElement(api_name)
if api_args:
request.translate_struct(api_args)
return self.connection.invoke_successfully(request, enable_tunneling)
def create_lun(self, volume_name, lun_name, size, metadata,
qos_policy_group_name=None):
"""Issues API request for creating LUN on volume."""
path = '/vol/%s/%s' % (volume_name, lun_name)
params = {'path': path, 'size': six.text_type(size),
'ostype': metadata['OsType'],
'space-reservation-enabled': metadata['SpaceReserved']}
version = self.get_ontapi_version()
if version >= (1, 110):
params['use-exact-size'] = 'true'
lun_create = netapp_api.NaElement.create_node_with_children(
'lun-create-by-size',
**params)
if qos_policy_group_name:
lun_create.add_new_child('qos-policy-group', qos_policy_group_name)
try:
self.connection.invoke_successfully(lun_create, True)
except netapp_api.NaApiError as ex:
with excutils.save_and_reraise_exception():
LOG.error("Error provisioning volume %(lun_name)s on "
"%(volume_name)s. Details: %(ex)s",
{'lun_name': lun_name,
'volume_name': volume_name,
'ex': ex})
def destroy_lun(self, path, force=True):
"""Destroys the LUN at the path."""
lun_destroy = netapp_api.NaElement.create_node_with_children(
'lun-destroy',
**{'path': path})
if force:
lun_destroy.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_destroy, True)
seg = path.split("/")
LOG.debug("Destroyed LUN %s", seg[-1])
def map_lun(self, path, igroup_name, lun_id=None):
"""Maps LUN to the initiator and returns LUN id assigned."""
lun_map = netapp_api.NaElement.create_node_with_children(
'lun-map', **{'path': path,
'initiator-group': igroup_name})
if lun_id:
lun_map.add_new_child('lun-id', lun_id)
try:
result = self.connection.invoke_successfully(lun_map, True)
return result.get_child_content('lun-id-assigned')
except netapp_api.NaApiError as e:
code = e.code
message = e.message
LOG.warning('Error mapping LUN. Code :%(code)s, Message: '
'%(message)s', {'code': code, 'message': message})
raise
def unmap_lun(self, path, igroup_name):
"""Unmaps a LUN from given initiator."""
lun_unmap = netapp_api.NaElement.create_node_with_children(
'lun-unmap',
**{'path': path, 'initiator-group': igroup_name})
try:
self.connection.invoke_successfully(lun_unmap, True)
except netapp_api.NaApiError as e:
exc_info = sys.exc_info()
LOG.warning("Error unmapping LUN. Code :%(code)s, Message: "
"%(message)s", {'code': e.code,
'message': e.message})
# if the LUN is already unmapped
if e.code == '13115' or e.code == '9016':
pass
else:
six.reraise(*exc_info)
def create_igroup(self, igroup, igroup_type='iscsi', os_type='default'):
"""Creates igroup with specified args."""
igroup_create = netapp_api.NaElement.create_node_with_children(
'igroup-create',
**{'initiator-group-name': igroup,
'initiator-group-type': igroup_type,
'os-type': os_type})
self.connection.invoke_successfully(igroup_create, True)
def add_igroup_initiator(self, igroup, initiator):
"""Adds initiators to the specified igroup."""
igroup_add = netapp_api.NaElement.create_node_with_children(
'igroup-add',
**{'initiator-group-name': igroup,
'initiator': initiator})
self.connection.invoke_successfully(igroup_add, True)
def do_direct_resize(self, path, new_size_bytes, force=True):
"""Resize the LUN."""
seg = path.split("/")
LOG.info("Resizing LUN %s directly to new size.", seg[-1])
lun_resize = netapp_api.NaElement.create_node_with_children(
'lun-resize',
**{'path': path,
'size': new_size_bytes})
if force:
lun_resize.add_new_child('force', 'true')
self.connection.invoke_successfully(lun_resize, True)
def get_lun_geometry(self, path):
"""Gets the LUN geometry."""
geometry = {}
lun_geo = netapp_api.NaElement("lun-get-geometry")
lun_geo.add_new_child('path', path)
try:
result = self.connection.invoke_successfully(lun_geo, True)
geometry['size'] = result.get_child_content("size")
geometry['bytes_per_sector'] =\
result.get_child_content("bytes-per-sector")
geometry['sectors_per_track'] =\
result.get_child_content("sectors-per-track")
geometry['tracks_per_cylinder'] =\
result.get_child_content("tracks-per-cylinder")
geometry['cylinders'] =\
result.get_child_content("cylinders")
geometry['max_resize'] =\
result.get_child_content("max-resize-size")
except Exception as e:
LOG.error("LUN %(path)s geometry failed. Message - %(msg)s",
{'path': path, 'msg': six.text_type(e)})
return geometry
def get_volume_options(self, volume_name):
"""Get the value for the volume option."""
opts = []
vol_option_list = netapp_api.NaElement("volume-options-list-info")
vol_option_list.add_new_child('volume', volume_name)
result = self.connection.invoke_successfully(vol_option_list, True)
options = result.get_child_by_name("options")
if options:
opts = options.get_children()
return opts
def move_lun(self, path, new_path):
"""Moves the LUN at path to new path."""
seg = path.split("/")
new_seg = new_path.split("/")
LOG.debug("Moving LUN %(name)s to %(new_name)s.",
{'name': seg[-1], 'new_name': new_seg[-1]})
lun_move = netapp_api.NaElement("lun-move")
lun_move.add_new_child("path", path)
lun_move.add_new_child("new-path", new_path)
self.connection.invoke_successfully(lun_move, True)
def get_iscsi_target_details(self):
"""Gets the iSCSI target portal details."""
raise NotImplementedError()
def get_fc_target_wwpns(self):
"""Gets the FC target details."""
raise NotImplementedError()
def get_iscsi_service_details(self):
"""Returns iscsi iqn."""
raise NotImplementedError()
def check_iscsi_initiator_exists(self, iqn):
"""Returns True if initiator exists."""
raise NotImplementedError()
def set_iscsi_chap_authentication(self, iqn, username, password):
"""Provides NetApp host's CHAP credentials to the backend."""
raise NotImplementedError()
def get_lun_list(self):
"""Gets the list of LUNs on filer."""
raise NotImplementedError()
def get_igroup_by_initiators(self, initiator_list):
"""Get igroups exactly matching a set of initiators."""
raise NotImplementedError()
def _has_luns_mapped_to_initiator(self, initiator):
"""Checks whether any LUNs are mapped to the given initiator."""
lun_list_api = netapp_api.NaElement('lun-initiator-list-map-info')
lun_list_api.add_new_child('initiator', initiator)
result = self.connection.invoke_successfully(lun_list_api, True)
lun_maps_container = result.get_child_by_name(
'lun-maps') or netapp_api.NaElement('none')
return len(lun_maps_container.get_children()) > 0
def has_luns_mapped_to_initiators(self, initiator_list):
"""Checks whether any LUNs are mapped to the given initiator(s)."""
for initiator in initiator_list:
if self._has_luns_mapped_to_initiator(initiator):
return True
return False
def get_lun_by_args(self, **args):
"""Retrieves LUNs with specified args."""
raise NotImplementedError()
def get_performance_counter_info(self, object_name, counter_name):
"""Gets info about one or more Data ONTAP performance counters."""
api_args = {'objectname': object_name}
result = self.send_request('perf-object-counter-list-info',
api_args,
enable_tunneling=False)
counters = result.get_child_by_name(
'counters') or netapp_api.NaElement('None')
for counter in counters.get_children():
if counter.get_child_content('name') == counter_name:
labels = []
label_list = counter.get_child_by_name(
'labels') or netapp_api.NaElement('None')
for label in label_list.get_children():
labels.extend(label.get_content().split(','))
base_counter = counter.get_child_content('base-counter')
return {
'name': counter_name,
'labels': labels,
'base-counter': base_counter,
}
else:
raise exception.NotFound(_('Counter %s not found') % counter_name)
def delete_snapshot(self, volume_name, snapshot_name):
"""Deletes a volume snapshot."""
api_args = {'volume': volume_name, 'snapshot': snapshot_name}
self.send_request('snapshot-delete', api_args)
def create_cg_snapshot(self, volume_names, snapshot_name):
"""Creates a consistency group snapshot out of one or more flexvols.
ONTAP requires an invocation of cg-start to first fence off the
flexvols to be included in the snapshot. If cg-start returns
success, a cg-commit must be executed to finalized the snapshot and
unfence the flexvols.
"""
cg_id = self._start_cg_snapshot(volume_names, snapshot_name)
if not cg_id:
msg = _('Could not start consistency group snapshot %s.')
raise exception.VolumeBackendAPIException(data=msg % snapshot_name)
self._commit_cg_snapshot(cg_id)
def _start_cg_snapshot(self, volume_names, snapshot_name):
snapshot_init = {
'snapshot': snapshot_name,
'timeout': 'relaxed',
'volumes': [
{'volume-name': volume_name} for volume_name in volume_names
],
}
result = self.send_request('cg-start', snapshot_init)
return result.get_child_content('cg-id')
def _commit_cg_snapshot(self, cg_id):
snapshot_commit = {'cg-id': cg_id}
self.send_request('cg-commit', snapshot_commit)
def get_snapshot(self, volume_name, snapshot_name):
"""Gets a single snapshot."""
raise NotImplementedError()
@utils.retry(exception.SnapshotIsBusy)
def wait_for_busy_snapshot(self, flexvol, snapshot_name):
"""Checks for and handles a busy snapshot.
If a snapshot is busy, for reasons other than cloning, an exception is
raised immediately. Otherwise, wait for a period of time for the clone
dependency to finish before giving up. If the snapshot is not busy then
no action is taken and the method exits.
"""
snapshot = self.get_snapshot(flexvol, snapshot_name)
if not snapshot['busy']:
LOG.debug("Backing consistency group snapshot %s available for "
"deletion.", snapshot_name)
return
else:
LOG.debug("Snapshot %(snap)s for vol %(vol)s is busy, waiting "
"for volume clone dependency to clear.",
{"snap": snapshot_name, "vol": flexvol})
raise exception.SnapshotIsBusy(snapshot_name=snapshot_name)
def mark_snapshot_for_deletion(self, volume, snapshot_name):
"""Mark snapshot for deletion by renaming snapshot."""
return self.rename_snapshot(
volume, snapshot_name, DELETED_PREFIX + snapshot_name)
def rename_snapshot(self, volume, current_name, new_name):
"""Renames a snapshot."""
api_args = {
'volume': volume,
'current-name': current_name,
'new-name': new_name,
}
return self.send_request('snapshot-rename', api_args)
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from testtools import matchers
from ironic.common import exception
from ironic.common import hash_ring
from ironic.db import api as dbapi
from ironic.tests import base
from ironic.tests.db import base as db_base
CONF = cfg.CONF
class HashRingTestCase(base.TestCase):
# NOTE(deva): the mapping used in these tests is as follows:
# if hosts = [foo, bar]:
# fake -> foo, bar
# if hosts = [foo, bar, baz]:
# fake -> foo, bar, baz
# fake-again -> bar, baz, foo
def test_create_ring(self):
hosts = ['foo', 'bar']
replicas = 2
ring = hash_ring.HashRing(hosts, replicas=replicas)
self.assertEqual(set(hosts), ring.hosts)
self.assertEqual(replicas, ring.replicas)
def test_create_with_different_partition_counts(self):
hosts = ['foo', 'bar']
CONF.set_override('hash_partition_exponent', 2)
ring = hash_ring.HashRing(hosts)
self.assertEqual(2 ** 2 * 2, len(ring._partitions))
CONF.set_override('hash_partition_exponent', 8)
ring = hash_ring.HashRing(hosts)
self.assertEqual(2 ** 8 * 2, len(ring._partitions))
CONF.set_override('hash_partition_exponent', 16)
ring = hash_ring.HashRing(hosts)
self.assertEqual(2 ** 16 * 2, len(ring._partitions))
def test_distribution_one_replica(self):
hosts = ['foo', 'bar', 'baz']
ring = hash_ring.HashRing(hosts, replicas=1)
fake_1_hosts = ring.get_hosts('fake')
fake_2_hosts = ring.get_hosts('fake-again')
# We should have one hosts for each thing
self.assertThat(fake_1_hosts, matchers.HasLength(1))
self.assertThat(fake_2_hosts, matchers.HasLength(1))
# And they must not be the same answers even on this simple data.
self.assertNotEqual(fake_1_hosts, fake_2_hosts)
def test_distribution_two_replicas(self):
hosts = ['foo', 'bar', 'baz']
ring = hash_ring.HashRing(hosts, replicas=2)
fake_1_hosts = ring.get_hosts('fake')
fake_2_hosts = ring.get_hosts('fake-again')
# We should have two hosts for each thing
self.assertThat(fake_1_hosts, matchers.HasLength(2))
self.assertThat(fake_2_hosts, matchers.HasLength(2))
# And they must not be the same answers even on this simple data
# because if they were we'd be making the active replica a hot spot.
self.assertNotEqual(fake_1_hosts, fake_2_hosts)
def test_distribution_three_replicas(self):
hosts = ['foo', 'bar', 'baz']
ring = hash_ring.HashRing(hosts, replicas=3)
fake_1_hosts = ring.get_hosts('fake')
fake_2_hosts = ring.get_hosts('fake-again')
# We should have two hosts for each thing
self.assertThat(fake_1_hosts, matchers.HasLength(3))
self.assertThat(fake_2_hosts, matchers.HasLength(3))
# And they must not be the same answers even on this simple data
# because if they were we'd be making the active replica a hot spot.
self.assertNotEqual(fake_1_hosts, fake_2_hosts)
self.assertNotEqual(fake_1_hosts[0], fake_2_hosts[0])
def test_ignore_hosts(self):
hosts = ['foo', 'bar', 'baz']
ring = hash_ring.HashRing(hosts, replicas=1)
equals_bar_or_baz = matchers.MatchesAny(
matchers.Equals(['bar']),
matchers.Equals(['baz']))
self.assertThat(
ring.get_hosts('fake', ignore_hosts=['foo']),
equals_bar_or_baz)
self.assertThat(
ring.get_hosts('fake', ignore_hosts=['foo', 'bar']),
equals_bar_or_baz)
self.assertEqual([], ring.get_hosts('fake', ignore_hosts=hosts))
def test_ignore_hosts_with_replicas(self):
hosts = ['foo', 'bar', 'baz']
ring = hash_ring.HashRing(hosts, replicas=2)
self.assertEqual(
set(['bar', 'baz']),
set(ring.get_hosts('fake', ignore_hosts=['foo'])))
self.assertEqual(set(['baz']),
set(ring.get_hosts('fake', ignore_hosts=['foo', 'bar'])))
self.assertEqual(
set(['baz', 'foo']),
set(ring.get_hosts('fake-again', ignore_hosts=['bar'])))
self.assertEqual(
set(['foo']),
set(ring.get_hosts('fake-again', ignore_hosts=['bar', 'baz'])))
self.assertEqual([], ring.get_hosts('fake', ignore_hosts=hosts))
def _compare_rings(self, nodes, conductors, ring,
new_conductors, new_ring):
delta = {}
mapping = dict((node, ring.get_hosts(node)[0]) for node in nodes)
new_mapping = dict(
(node, new_ring.get_hosts(node)[0]) for node in nodes)
for key, old in mapping.items():
new = new_mapping.get(key, None)
if new != old:
delta[key] = (old, new)
return delta
def test_rebalance_stability_join(self):
num_conductors = 10
num_nodes = 10000
# Adding 1 conductor to a set of N should move 1/(N+1) of all nodes
# Eg, for a cluster of 10 nodes, adding one should move 1/11, or 9%
# We allow for 1/N to allow for rounding in tests.
redistribution_factor = 1.0 / num_conductors
nodes = [str(x) for x in range(num_nodes)]
conductors = [str(x) for x in range(num_conductors)]
new_conductors = conductors + ['new']
delta = self._compare_rings(nodes,
conductors, hash_ring.HashRing(conductors),
new_conductors, hash_ring.HashRing(new_conductors))
self.assertTrue(len(delta) < num_nodes * redistribution_factor)
def test_rebalance_stability_leave(self):
num_conductors = 10
num_nodes = 10000
# Removing 1 conductor from a set of N should move 1/(N) of all nodes
# Eg, for a cluster of 10 nodes, removing one should move 1/10, or 10%
# We allow for 1/(N-1) to allow for rounding in tests.
redistribution_factor = 1.0 / (num_conductors - 1)
nodes = [str(x) for x in range(num_nodes)]
conductors = [str(x) for x in range(num_conductors)]
new_conductors = conductors[:]
new_conductors.pop()
delta = self._compare_rings(nodes,
conductors, hash_ring.HashRing(conductors),
new_conductors, hash_ring.HashRing(new_conductors))
self.assertTrue(len(delta) < num_nodes * redistribution_factor)
def test_more_replicas_than_hosts(self):
hosts = ['foo', 'bar']
ring = hash_ring.HashRing(hosts, replicas=10)
self.assertEqual(set(hosts), set(ring.get_hosts('fake')))
def test_ignore_non_existent_host(self):
hosts = ['foo', 'bar']
ring = hash_ring.HashRing(hosts, replicas=1)
self.assertEqual(['foo'], ring.get_hosts('fake',
ignore_hosts=['baz']))
def test_create_ring_invalid_data(self):
hosts = None
self.assertRaises(exception.Invalid,
hash_ring.HashRing,
hosts)
def test_get_hosts_invalid_data(self):
hosts = ['foo', 'bar']
ring = hash_ring.HashRing(hosts)
self.assertRaises(exception.Invalid,
ring.get_hosts,
None)
class HashRingManagerTestCase(db_base.DbTestCase):
def setUp(self):
super(HashRingManagerTestCase, self).setUp()
self.ring_manager = hash_ring.HashRingManager()
self.dbapi = dbapi.get_instance()
def register_conductors(self):
self.dbapi.register_conductor({
'hostname': 'host1',
'drivers': ['driver1', 'driver2'],
})
self.dbapi.register_conductor({
'hostname': 'host2',
'drivers': ['driver1'],
})
def test_hash_ring_manager_get_ring_success(self):
self.register_conductors()
ring = self.ring_manager['driver1']
self.assertEqual(sorted(['host1', 'host2']), sorted(ring.hosts))
def test_hash_ring_manager_driver_not_found(self):
self.register_conductors()
self.assertRaises(exception.DriverNotFound,
self.ring_manager.__getitem__,
'driver3')
def test_hash_ring_manager_no_refresh(self):
# If a new conductor is registered after the ring manager is
# initialized, it won't be seen. Long term this is probably
# undesirable, but today is the intended behavior.
self.assertRaises(exception.DriverNotFound,
self.ring_manager.__getitem__,
'driver1')
self.register_conductors()
self.assertRaises(exception.DriverNotFound,
self.ring_manager.__getitem__,
'driver1')
| |
# -*- coding: utf-8 -*-
import markupsafe
from django.db import models
from addons.base import exceptions
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from addons.bitbucket.api import BitbucketClient
from addons.bitbucket.serializer import BitbucketSerializer
from addons.bitbucket import settings as bitbucket_settings
from addons.bitbucket.exceptions import NotFoundError
from framework.auth import Auth
from osf.models.external import ExternalProvider
from osf.models.files import File, Folder, BaseFileNode
from website import settings
from website.util import web_url_for
hook_domain = bitbucket_settings.HOOK_DOMAIN or settings.DOMAIN
class BitbucketFileNode(BaseFileNode):
_provider = 'bitbucket'
class BitbucketFolder(BitbucketFileNode, Folder):
pass
class BitbucketFile(BitbucketFileNode, File):
version_identifier = 'commitSha'
def touch(self, auth_header, revision=None, commitSha=None, branch=None, **kwargs):
revision = revision or commitSha or branch
return super(BitbucketFile, self).touch(auth_header, revision=revision, **kwargs)
@property
def _hashes(self):
try:
return {'commit': self._history[-1]['extra']['commitSha']}
except (IndexError, KeyError):
return None
class BitbucketProvider(ExternalProvider):
"""Provider to handler Bitbucket OAuth workflow
API Docs::
* https://developer.atlassian.com/bitbucket/api/2/reference/meta/authentication
* https://confluence.atlassian.com/bitbucket/oauth-on-bitbucket-cloud-238027431.html
"""
name = 'Bitbucket'
short_name = 'bitbucket'
client_id = bitbucket_settings.CLIENT_ID
client_secret = bitbucket_settings.CLIENT_SECRET
auth_url_base = bitbucket_settings.OAUTH_AUTHORIZE_URL
callback_url = bitbucket_settings.OAUTH_ACCESS_TOKEN_URL
default_scopes = bitbucket_settings.SCOPE
auto_refresh_url = callback_url
refresh_time = bitbucket_settings.REFRESH_TIME
expiry_time = bitbucket_settings.EXPIRY_TIME
def handle_callback(self, response):
"""View called when the OAuth flow is completed. Adds a new BitbucketUserSettings
record to the user and saves the account info.
"""
client = BitbucketClient(access_token=response['access_token'])
user_info = client.user()
return {
'provider_id': user_info['uuid'],
'profile_url': user_info['links']['html']['href'],
'display_name': user_info['username']
}
def fetch_access_token(self, force_refresh=False):
self.refresh_oauth_key(force=force_refresh)
return self.account.oauth_key
class UserSettings(BaseOAuthUserSettings):
"""Stores user-specific bitbucket information
Quirks::
* Bitbucket does not support remote revocation of access tokens.
"""
oauth_provider = BitbucketProvider
serializer = BitbucketSerializer
# Required for importing username from social profile configuration page
# Assumes oldest connected account is primary.
@property
def public_id(self):
bitbucket_accounts = self.owner.external_accounts.filter(provider=self.oauth_provider.short_name)
if bitbucket_accounts:
return bitbucket_accounts[0].display_name
return None
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = BitbucketProvider
serializer = BitbucketSerializer
user = models.TextField(blank=True, null=True)
repo = models.TextField(blank=True, null=True)
hook_id = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
"""Authenticated ExternalProvider instance"""
if self._api is None:
self._api = BitbucketProvider(self.external_account)
return self._api
@property
def folder_id(self):
return self.repo or None
@property
def folder_name(self):
if self.complete:
return '{}/{}'.format(self.user, self.repo)
return None
@property
def folder_path(self):
return self.repo or None
@property
def complete(self):
return self.has_auth and self.repo is not None and self.user is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.owner.add_log(
action='bitbucket_node_authorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=Auth(user_settings.owner),
)
if save:
self.save()
def clear_settings(self):
self.user = None
self.repo = None
self.hook_id = None
def deauthorize(self, auth=None, log=True):
# self.delete_hook(save=False)
self.clear_settings()
if log:
self.owner.add_log(
action='bitbucket_node_deauthorized',
params={
'project': self.owner.parent_id,
'node': self.owner._id,
},
auth=auth,
)
self.clear_auth()
def delete(self, save=False):
super(NodeSettings, self).delete(save=False)
self.deauthorize(log=False)
if save:
self.save()
@property
def repo_url(self):
if self.user and self.repo:
return 'https://bitbucket.org/{0}/{1}/'.format(
self.user, self.repo
)
@property
def short_url(self):
if self.user and self.repo:
return '/'.join([self.user, self.repo])
@property
def is_private(self):
repo = self.fetch_repo()
if repo:
return repo['is_private']
return None
def fetch_repo(self):
connection = BitbucketClient(access_token=self.api.fetch_access_token())
return connection.repo(user=self.user, repo=self.repo)
def fetch_access_token(self):
return self.api.fetch_access_token()
# TODO: Delete me and replace with serialize_settings / Knockout
def to_json(self, user):
ret = super(NodeSettings, self).to_json(user)
user_settings = user.get_addon('bitbucket')
ret.update({
'user_has_auth': user_settings and user_settings.has_auth,
'is_registration': self.owner.is_registration,
})
if self.user_settings and self.user_settings.has_auth:
connection = BitbucketClient(access_token=self.api.fetch_access_token())
valid_credentials = True
try:
mine = connection.repos()
ours = connection.team_repos()
repo_names = [
repo['full_name'].replace('/', ' / ')
for repo in mine + ours
]
except Exception:
repo_names = []
valid_credentials = False
owner = self.user_settings.owner
if owner == user:
ret.update({'repo_names': repo_names})
ret.update({
'node_has_auth': True,
'bitbucket_user': self.user or '',
'bitbucket_repo': self.repo or '',
'bitbucket_repo_full_name': '{0} / {1}'.format(self.user, self.repo) if (self.user and self.repo) else '',
'auth_osf_name': owner.fullname,
'auth_osf_url': owner.url,
'auth_osf_id': owner._id,
'bitbucket_user_name': self.external_account.display_name,
'bitbucket_user_url': self.external_account.profile_url,
'is_owner': owner == user,
'valid_credentials': valid_credentials,
'addons_url': web_url_for('user_addons'),
'files_url': self.owner.web_url_for('collect_file_trees')
})
return ret
def serialize_waterbutler_credentials(self):
if not self.complete or not self.repo:
raise exceptions.AddonError('Addon is not authorized')
return {'token': self.api.fetch_access_token()}
def serialize_waterbutler_settings(self):
if not self.complete:
raise exceptions.AddonError('Repo is not configured')
return {
'owner': self.user,
'repo': self.repo,
}
def create_waterbutler_log(self, auth, action, metadata):
path = metadata['path']
url = self.owner.web_url_for('addon_view_or_download_file', path=path, provider='bitbucket')
sha, urls = None, {}
try:
sha = metadata['extra']['commitSha']
urls = {
'view': '{0}?commitSha={1}'.format(url, sha),
'download': '{0}?action=download&commitSha={1}'.format(url, sha)
}
except KeyError:
pass
self.owner.add_log(
'bitbucket_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': path,
'urls': urls,
'bitbucket': {
'user': self.user,
'repo': self.repo,
'commitSha': sha,
},
},
)
#############
# Callbacks #
#############
def before_page_load(self, node, user):
"""
:param Node node:
:param User user:
:return str: Alert message
"""
messages = []
# Quit if not contributor
if not node.is_contributor(user):
return messages
# Quit if not configured
if self.user is None or self.repo is None:
return messages
# Quit if no user authorization
if self.user_settings is None:
return messages
repo_data = self.fetch_repo()
if repo_data:
node_permissions = 'public' if node.is_public else 'private'
repo_permissions = 'private' if repo_data['is_private'] else 'public'
if repo_permissions != node_permissions:
message = (
'Warning: This OSF {category} is {node_perm}, but the Bitbucket '
'repo {user} / {repo} is {repo_perm}.'.format(
category=markupsafe.escape(node.project_or_component),
node_perm=markupsafe.escape(node_permissions),
repo_perm=markupsafe.escape(repo_permissions),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
if repo_permissions == 'private':
message += (
' Users can view the contents of this private Bitbucket '
'repository through this public project.'
)
else:
message += (
' The files in this Bitbucket repo can be viewed on Bitbucket '
'<u><a href="https://bitbucket.org/{user}/{repo}/">here</a></u>.'
).format(
user=self.user,
repo=self.repo,
)
messages.append(message)
else:
message = (
'Warning: the Bitbucket repo {user} / {repo} connected to this OSF {category} has been deleted.'.format(
category=markupsafe.escape(node.project_or_component),
user=markupsafe.escape(self.user),
repo=markupsafe.escape(self.repo),
)
)
messages.append(message)
return messages
def before_remove_contributor_message(self, node, removed):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
try:
message = (super(NodeSettings, self).before_remove_contributor_message(node, removed) +
'You can download the contents of this repository before removing '
'this contributor <u><a href="{url}">here</a></u>.'.format(
url=node.api_url + 'bitbucket/tarball/'
))
except TypeError:
# super call returned None due to lack of user auth
return None
else:
return message
# backwards compatibility -- TODO: is this necessary?
before_remove_contributor = before_remove_contributor_message
def after_remove_contributor(self, node, removed, auth=None):
"""
:param Node node:
:param User removed:
:return str: Alert message
"""
if self.user_settings and self.user_settings.owner == removed:
# Delete OAuth tokens
self.user_settings = None
self.save()
message = (
u'Because the Bitbucket add-on for {category} "{title}" was authenticated '
u'by {user}, authentication information has been deleted.'
).format(
category=markupsafe.escape(node.category_display),
title=markupsafe.escape(node.title),
user=markupsafe.escape(removed.fullname)
)
if not auth or auth.user != removed:
url = node.web_url_for('node_setting')
message += (
u' You can re-authenticate on the <u><a href="{url}">Settings</a></u> page.'
).format(url=url)
#
return message
def after_fork(self, node, fork, user, save=True):
"""Hook to run after forking a project. If the forking user is not
the same as the original authorizing user, the Bitbucket
credentials will *not* be copied over.
:param Node node: Original node
:param Node fork: Forked node
:param User user: User creating fork
:param bool save: Save settings after callback
:return tuple: Tuple of cloned settings and alert message
"""
clone = super(NodeSettings, self).after_fork(
node, fork, user, save=False
)
# Copy authentication if authenticated by forking user
if self.user_settings and self.user_settings.owner == user:
clone.user_settings = self.user_settings
if save:
clone.save()
return clone
def before_make_public(self, node):
try:
is_private = self.is_private
except NotFoundError:
return None
if is_private:
return (
'This {cat} is connected to a private Bitbucket repository. Users '
'(other than contributors) will not be able to see the '
'contents of this repo unless it is made public on Bitbucket.'
).format(
cat=node.project_or_component,
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
| |
import reversion
from django.contrib import admin
from django import forms
from django.utils.translation import ugettext as _
from django.conf import settings
from django.contrib.admin.views.main import ChangeList
from mptt.admin import MPTTModelAdmin
from multilingual_model.admin import TranslationInline
from redactor.widgets import RedactorEditor
from datetimewidget.widgets import DateTimeWidget
from cmsbase.admin import PageAdmin, PageFormAdmin, PublishingWorkflowAdmin, PageDataSetAdminForm
from cmsbase.admin_forms import TranslationForm
from cmsbase.widgets import AdminImageWidget, AdminCustomFileWidget
from blog.models import *
from blog import settings as blog_settings
from filemanager.widgets import MultipleFileWidget
if blog_settings.ENABLE_TAGGING:
from tagging.forms import TagField
from blog.widgets import TagAutocomplete
class ArticleTranslationForm(TranslationForm):
class Meta:
model = ArticleTranslation
exclude = ['content']
def __init__(self, *args, **kwargs):
super(ArticleTranslationForm, self).__init__(*args, **kwargs)
if blog_settings.ENABLE_TAGGING:
self.fields['tags'] = TagField(widget=TagAutocomplete(), required=False, help_text=_('Please enter a list of comma separated keywords.'))
# Add tags to the fieldsets
self._fieldsets[0][1]['fields'].append('tags')
class ArticleAdminForm(PageFormAdmin):
publish_date = forms.DateTimeField(widget=DateTimeWidget(attrs={'id':"datetime-picker"}, usel10n = True))
categories = forms.ModelMultipleChoiceField(queryset=Category.objects.filter(), widget=forms.CheckboxSelectMultiple, required=False)
authors = forms.ModelMultipleChoiceField(queryset=Author.objects.filter(), widget=forms.CheckboxSelectMultiple, required=False)
images = forms.CharField(widget=MultipleFileWidget, required=False)
class Meta:
model = Article
def __init__(self, *args, **kwargs):
from django.contrib.contenttypes.models import ContentType
super(ArticleAdminForm, self).__init__(*args, **kwargs)
if not blog_settings.BLOG_ENABLE_CATEGORIES:
del self.fields['categories']
if self.instance:
content_type = ContentType.objects.get_for_model(self.instance)
object_pk = self.instance.id
self.fields['images'].widget.attrs.update({'content_type':content_type.id, 'object_pk':object_pk})
else:
self.fields['images'].widget.attrs.update({'content_type':False, 'object_pk':False})
class ArticleAdmin(reversion.VersionAdmin, PublishingWorkflowAdmin):
form = ArticleAdminForm
translation_form_class = ArticleTranslationForm
ordering = ['-publish_date']
# Override the list display from PublishingWorkflowAdmin
def get_list_display(self, request, obj=None):
if not settings.PREFIX_DEFAULT_LOCALE:
return ['title', 'is_published', 'approval', 'publish_date', 'template', 'content']
else:
return ['title', 'is_published', 'approval', 'publish_date', 'template', 'languages']
fieldsets = (
('Settings', {
'classes': ('default',),
'fields': ('display_title', 'template', 'dataset', 'parent', 'publish_date' )
}),
('Authors', {
#'description':_('The page template'),
'classes': ('default',),
'fields': ('authors',)
}),
('Images', {
#'description':_('The page template'),
'classes': ('default',),
'fields': ('images',)
}),
)
if blog_settings.BLOG_ENABLE_CATEGORIES:
fieldsets = fieldsets + (
('Categories', {
#'description':_('The page template'),
'classes': ('default',),
'fields': ('categories',)
}),
)
# if blog_settings.ENABLE_TAGGING:
# fieldsets = fieldsets + (
# ('', {
# 'classes': ('default',),
# 'fields': ('tags',)
# }),
# )
class Media:
css = {
"all": ("admin/css/page.css",)
}
js = ("admin/js/page.js",)
# def get_urls(self):
# from django.conf.urls import patterns, url
# urls = super(ArticleAdmin, self).get_urls()
# my_urls = patterns('',
# url(r'translation/(?P<page_id>[-\w]+)/(?P<language_code>[-\w]+)/', self.admin_site.admin_view(add_edit_translation), {'translation_class':self.model.CMSMeta.translation_class}, name='add_edit_translation' ),
# )
# return my_urls + urls
admin.site.register(Article, ArticleAdmin)
class ArticleDataSetAdmin(reversion.VersionAdmin):
form = PageDataSetAdminForm
admin.site.register(ArticleDataSet, ArticleDataSetAdmin)
# Category translation
class CategoryTranslationInline(TranslationInline):
model = CategoryTranslation
prepopulated_fields = {'slug': ('title',)}
# Category
class CategoryForm(forms.ModelForm):
required_css_class = 'required'
error_css_class = 'errorfield'
class Meta:
model = Category
class CategoryAdmin(MPTTModelAdmin):
form = CategoryForm
list_display = ["title", "identifier", "published", 'order_id', 'languages']
inlines = (CategoryTranslationInline, )
mptt_indent_field = 'title'
mptt_level_indent = 20
def title(self, obj):
translation = obj.translated() #PageTranslation.objects.filter(parent=obj, language_code=settings.DEFAULT_LANGUAGE)
if translation:
return translation.title
else:
return _('No translation available for default language')
def languages(self, obj):
ts=[]
for t in obj.get_translations():
ts.append(u'<img src="/static/admin/img/flags/%s.png" alt="" rel="tooltip" data-title="%s">' % (t.language_code, t.__unicode__()))
return ' '.join(ts)
languages.allow_tags = True
languages.short_description = 'Translations'
# Override the list display from PublishingWorkflowAdmin
def get_list_display(self, request, obj=None):
if not settings.PREFIX_DEFAULT_LOCALE:
return ["title", "identifier", "published", 'order_id']
else:
return ["title", "identifier", "published", 'order_id', 'languages']
fieldsets = (
('Settings', {
#'description':_('The page template'),
'classes': ('default',),
'fields': ('published', 'parent', 'order_id', 'identifier', )
}),
)
class Media:
css = {
"all": ("admin/css/page.css",)
}
js = ("admin/js/page.js",)
if blog_settings.BLOG_ENABLE_CATEGORIES:
admin.site.register(Category, CategoryAdmin)
# Author translation
class AuthorTranslationInline(TranslationInline):
model = AuthorTranslation
class AuthorForm(forms.ModelForm):
required_css_class = 'required'
error_css_class = 'errorfield'
photo = forms.ImageField(label=_('Photo'), widget=AdminImageWidget, required=False)
class Meta:
model = Author
# Category
class AuthorAdmin(admin.ModelAdmin):
form = AuthorForm
list_display = ["title", "identifier", "published", 'order_id', 'languages']
inlines = (AuthorTranslationInline, )
def title(self, obj):
return '%s %s' % (obj.first_name, obj.last_name)
def languages(self, obj):
ts=[]
for t in obj.get_translations():
ts.append(u'<img src="/static/admin/img/flags/%s.png" alt="" rel="tooltip" data-title="%s">' % (t.language_code, t.__unicode__()))
return ' '.join(ts)
languages.allow_tags = True
languages.short_description = 'Translations'
# Override the list display from PublishingWorkflowAdmin
def get_list_display(self, request, obj=None):
if not settings.PREFIX_DEFAULT_LOCALE:
return ["title", "identifier", "published", 'order_id']
else:
return ["title", "identifier", "published", 'order_id', 'languages']
fieldsets = (
('Settings', {
#'description':_('The page template'),
'classes': ('default',),
'fields': ('published', 'first_name', 'last_name', 'photo', 'order_id', 'identifier', )
}),
)
class Media:
css = {
"all": ("admin/css/page.css",)
}
#js = ("admin/js/page.js",)
admin.site.register(Author, AuthorAdmin)
| |
# -*- coding: UTF-8 -*-
import basic, os, shutil
import re
from functions import formatTextareaInput, prettyItemBonus
from misc import miscController
class itemsController(basic.defaultController):
DIR = './ugc/'
RE_CHECK_NAME = re.compile('^[a-zA-Z\s\-\+\']+$', re.U+re.I)
@basic.methods
def methods(self, params = {}):
return {
'type_of_form': {
'add_item': self.addNewItem,
'edit_item': self.editItem,
'delete_item': self.deleteItem
}
}
@basic.printpage
def printPage(self, page, params):
return {
'craft_item': self.printCraftItemPage,
'item': self.printItemPage,
'edit_item': self.printItemEditPage
}
# --------------------------------------------------------------------------------------------------
# Misc
def getItem(self, params, logged_check = False, self_item_check = False):
if logged_check and not self.cur_player:
return {'error': 1002}
item = False
if 'id' in params and params['id']:
try:
item = self.model.items.getCraftedItem(params['id'])
except Exception:
pass
if not item:
return{'error': 5001}
else:
if not (self.cur_player and
('login_admin' in self.cur_player and self.cur_player['login_admin'] or
'login_moderator' in self.cur_player and self.cur_player['login_moderator'] or
self.cur_player['login_id'] == item['author'])
):
if 'reject' in item and item['reject']:
if not (self.cur_player and self.cur_player['login_id'] == item['author']):
return {'error': 5002}
elif not 'approve' in item or not item['approve']['approved']:
return {'error': 5003}
if self_item_check:
can_edit = (item['author'] == self.cur_player['login_id'] and not item['sale_info']['active']) or 'login_admin' in self.cur_player or 'login_moderator' in self.cur_player
if not can_edit:
self.sbuilder.httpRedirect('../')
return item
def getPlayerBuyedItems(self, _id):
return self.model.items.getPlayerBuyedItems(_id)
# --------------------------------------------------------------------------------------------------
# Page methods
def addNewItem(self, params):
def isEnoughResources(params):
is_enough = self.cur_player['login_resources']['ore'] >= self.balance.ORE_COST_PER_ITEM
if 'rune' in params:
is_enough = is_enough and self.cur_player['login_resources']['eore'] >= int(params['rune'])
return is_enough
if self.balance.MIN_LEVEL_TO_CREATE > self.cur_player['login_lvl']:
return self.sbuilder.throwWebError(6001)
try:
min_cost = self.balance.getItemMinCost({'lvl': int(params['level'])})
except Exception:
min_cost = self.balance.getItemMinCost({'lvl': self.cur_player['login_lvl']})
rules = {
'name': {'min_length':3, 'max_length': 40, 'match': self.RE_CHECK_NAME, 'not_dublicate': {'col_crafted': 'name'}},
'desc': {'min_length':4, 'max_length': 1000},
'img': {'not_null': 1},
'level': {'gt': 0, 'not_null':1, 'int': 1},
'item_type': {'not_null':1},
'cost': {'int': 1, 'gt': min_cost-1, 'lt': 1000000 ,'not_null': 1}
}
status = self.checkParams(params, rules)
if status['status']:
if isEnoughResources(params):
crafted_item = self.model.crafted_item()
original_file = self.sbuilder.core_settings.APP_DIR+self.sbuilder.core_settings.TEMPLATES_FOLDER+params['img']
if os.path.exists(original_file):
dest_file_name = original_file.split("/")[-1]
dest_file = self.sbuilder.core_settings.APP_DIR+self.sbuilder.core_settings.TEMPLATES_FOLDER+self.sbuilder.core_settings.IMAGE_BUFFER_FOLDER+dest_file_name
shutil.copyfile(original_file,dest_file)
os.unlink(original_file)
params['img'] = self.sbuilder.core_settings.RESIZED_IMG_PATH+'items/'+dest_file_name
else:
params['img'] = ""
cost = int(params['cost'])
if cost < min_cost-1:
cost = min_cost
crafted_item.data.update({
'name': params['name'].strip().title(),
'desc': formatTextareaInput(params['desc']),
'author': self.cur_player['login_id'],
'img': params['img'],
'cost': cost,
'lvl_min': int(params['level']),
'lvl_max': self.balance.max_lvl,
})
crafted_item.data.update(self.model.misc.getImageInfo(params))
item_type_data = params['item_type'].split(':')
if len(item_type_data) > 1:
crafted_item.data['type'] = int(item_type_data[0])
crafted_item.data['view'] = item_type_data[1]
else:
crafted_item.data['type'] = int(params['item_type'])
crafted_item.data['view'] = self.balance.item_types[int(params['item_type'])]
bonus = {}
total_max_bonus = int(self.cur_player['login_lvl'])*2+10
total_value = 0
for key in ['str', 'dex', 'int', 'luck', 'DEF', 'DMG', 'HP', 'MP']:
try:
value = int(params[key])
except Exception:
value = 0
if value != 0:
total_value += value
bonus.update({key: value})
if total_value <= total_max_bonus:
crafted_item.data.update({'bonus': bonus})
if 'rune' in params:
eore_added = int(params['rune'])
else:
eore_added = False
self.model.items.addCraftedItem(self.cur_player['login_id'], crafted_item.data, eore_added)
params.update({'operation': 'add_item', 'operation_result': 'ok'})
self.sbuilder.httpRedirect(self.core.loaded_data['site_address']+'/u/creation_center?creation=ok&type=item')
else:
params.update({'operation': 'add_item', 'operation_result': 'error', 'errors': status['errors']})
else:
params.update({'operation': 'add_item', 'operation_result': 'error', 'errors': status['errors']})
else:
params.update({'operation': 'add_item', 'operation_result': 'error', 'errors': status['errors']})
def editItem(self, params):
item = self.getItem(params, logged_check=True, self_item_check=True)
min_cost = self.balance.getItemMinCost({'lvl': item['lvl_min']})
rules = {
'desc': {'min_length':4, 'max_length': 1000},
'item_type': {'not_null':1},
'cost': {'int': 1, 'gt': min_cost-1, 'lt': 1000000 ,'not_null': 1}
}
if item['name'] != params['name']:
rules.update({'name': {'min_length':3, 'max_length': 40, 'match': self.RE_CHECK_NAME, 'not_dublicate': {'col_crafted': 'name'}}})
status = self.checkParams(params, rules)
if status['status']:
new_item_data = {}
item_type_data = params['item_type'].split(':')
if len(item_type_data) > 1:
new_item_data['type'] = int(item_type_data[0])
new_item_data['view'] = item_type_data[1]
else:
new_item_data['type'] = int(params['item_type'])
new_item_data['view'] = self.balance.item_types[int(params['item_type'])]
cost = int(params['cost'])
if cost < min_cost-1:
cost = min_cost
try:
original_file = self.sbuilder.core_settings.APP_DIR+self.sbuilder.core_settings.TEMPLATES_FOLDER+params['img']
if os.path.exists(original_file):
dest_file_name = original_file.split("/")[-1]
dest_file = self.sbuilder.core_settings.APP_DIR+self.sbuilder.core_settings.TEMPLATES_FOLDER+self.sbuilder.core_settings.IMAGE_BUFFER_FOLDER+dest_file_name
shutil.copyfile(original_file,dest_file)
os.unlink(original_file)
new_item_data.update({'img':self.sbuilder.core_settings.RESIZED_IMG_PATH+'items/'+dest_file_name})
except Exception:
pass
new_item_data.update({
'name': params['name'].strip(),
'desc': formatTextareaInput(params['desc']),
'cost': cost
})
new_item_data.update(self.model.misc.getImageInfo(params))
old_data = {}
for key in ['name', 'desc', 'cost', 'type', 'img']:
if key in new_item_data and new_item_data[key] != item[key]:
old_data.update({key: item[key]})
for key in ['link', 'name', 'email', 'twitter']:
if key in new_item_data['img_info'] and new_item_data['img_info'][key] and key in item['img_info']:
old_data.update({'Artwork: '+key: item['img_info'][key]})
no_need_approve = 'login_admin' in self.cur_player and self.cur_player['login_admin'] or 'login_moderator' in self.cur_player and self.cur_player['login_moderator']
if not no_need_approve:
no_need_approve = 'cost' in old_data and len(old_data) == 1 or not old_data
else:
self.model.misc.writeToLog(self.cur_player['login_id'], {
'action': 'item edit',
'item_id': item['_id']
})
new_item_data.update({'old_data': old_data})
self.model.items.updateItemData(item['_id'], new_item_data, no_need_approve)
self.sbuilder.httpRedirect('/u/item?id='+params['id']+'&edit=ok')
self.sbuilder.httpRedirect('/u/edit_item?id='+params['id']+'&edit=fail')
def deleteItem(self, params):
item = self.getItem(params, logged_check=True, self_item_check=True)
self.model.items.deleteItem(item, self.cur_player['login_name'])
self.sbuilder.httpRedirect('/u/creation_center?delete=item')
# --------------------------------------------------------------------------------------------------
# Print pages
def printCraftItemPage(self, fields, params):
fields.update({self.title: 'Craft new item'})
if not self.cur_player:
return self.sbuilder.throwWebError(1002)
response = self._printUGCDisablePage(fields)
if response: return response
if self.balance.MIN_LEVEL_TO_CREATE > self.cur_player['login_lvl']:
return self.sbuilder.throwWebError(6001)
if self.cur_player and self.cur_player['login_ugc_disabled']:
return self.sbuilder.httpRedirect('/u/create')
player = self.model.players.getPlayer(self.cur_player['login_name'],'game', flags = ['no_messages'])
if not ('agree_with_rules' in player and player['agree_with_rules']):
return basic.defaultController._printTemplate(self, 'rules_agree_form', fields)
fields.update({
'player': player,
'max_stat_level': self.balance.getMaxStatsForItemCreate(player['lvl'])
})
if player['resources']['ore'] >= self.sbuilder.balance.ORE_COST_PER_ITEM:
return basic.defaultController._printTemplate(self, 'create_item', fields)
else:
return basic.defaultController._printTemplate(self, 'not_enough/ne_item', fields)
def printItemPage(self, fields, params):
item = self.getItem(params)
if 'error' in item:
return self.sbuilder.throwWebError(item['error'], 'item')
item.update(prettyItemBonus(item, self.balance.stats_name))
author = self.model.players.getPlayerBy_ID(item['author'], {'name':1})
if author:
item.update({'author_name': author['name']})
item['img'] = '/'+item['img']+'_fit.png'
item['img_info'] = miscController.formatArtworkInfo(item['img_info'])
fields.update(item)
likes = self.model.items.getItemLikes(item['_id'])
if 'reject' in item:
try:
rejecter = self.model.players.getPlayerBy_ID(item['reject']['rejecter_id'], {'name':1})
fields.update({'reject_name': rejecter['name']})
except Exception:
fields.update({'reject_name': 'game'})
if self.cur_player:
fields.update({
'inventory_count': self.model.items.getInventoryCount(self.cur_player['login_id']),
'inventory_max': self.sbuilder.balance.INVENTORY_SIZE
})
fields.update({
self.title: item['name']+' page',
'likes': len(likes['people']),
'is_like': self.cur_player and self.cur_player['login_id'] in likes['people'],
'is_reported': self.cur_player and self.model.items.isReportItem(item['_id'], self.cur_player['login_id']),
'reasons': self.balance.getRejectReasons(self.balance.item_reject_reasons),
'categories': self.balance.categories
})
return basic.defaultController._printTemplate(self, 'item_page', fields)
def printItemEditPage(self, fields, params):
item = self.getItem(params, logged_check=True, self_item_check=True)
item.update(prettyItemBonus(item, self.balance.stats_name))
if item['type'] == 1:
item['type'] = '1:'+item['view']
else:
item['type'] = str(item['type'])
fields.update({self.title: 'Edit '+item['name']+' page'})
fields.update(item)
return basic.defaultController._printTemplate(self, 'item_edit_page', fields)
data = {
'class': itemsController,
'type': ['u'],
'urls': ['market', 'craft_item', 'my_items', 'waiting_items', 'item', 'edit_item']
}
| |
"""build config functions"""
import os.path
import glob
import yaml
from collections import OrderedDict
from mod import log, util, dep
from mod.tools import cmake, make, ninja, xcodebuild
from mod import emscripten, nacl, android
# non-cross-compiling platforms
native_platforms = [
'osx',
'linux',
'win32',
'win64'
]
# supported cmake generators
generators = [
'Unix Makefiles',
'Ninja',
'Xcode',
'Visual Studio 12',
'Visual Studio 12 Win64',
'Visual Studio 14',
'Visual Studio 14 Win64',
'CodeBlocks - Ninja',
'CodeBlocks - Unix Makefiles',
'CodeLite - Ninja',
'CodeLite - Unix Makefiles',
'Eclipse CDT4 - Ninja',
'Eclipse CDT4 - Unix Makefiles',
'KDevelop3',
'KDevelop3 - Unix Makefiles',
'Kate - Ninja',
'Kate - Unix Makefiles',
'Sublime Text 2 - Ninja',
'Sublime Text 2 - Unix Makefiles'
]
build_tools = [
'make',
'ninja',
'xcodebuild',
'cmake'
]
build_types = [
'Release',
'Debug',
'Profiling'
]
default_config = {
'osx': 'osx-xcode-debug',
'linux': 'linux-make-debug',
'win': 'win64-vs2013-debug',
}
#-------------------------------------------------------------------------------
def valid_generator(name) :
"""test if provided cmake generator name is valid
:param name: generator name (e.g. 'Unix Makefiles', 'Ninja', ...)
:returns: True if generator name is valid
"""
return name in generators
#-------------------------------------------------------------------------------
def valid_build_tool(name) :
"""test if provided build tool name is valid
:param name: a build tool nake (make, ninja, ...)
:returns: True if build tool name is valid
"""
return name in build_tools
#-------------------------------------------------------------------------------
def valid_build_type(name) :
"""test if provided build type name is valid
:param name: build type (Debug, Release, ...)
:returns: True if build type is valid
"""
return name in build_types
#-------------------------------------------------------------------------------
def get_default_config() :
"""get the default config name for this platform
:returns: default config name for this host platform
"""
return default_config[util.get_host_platform()]
#-------------------------------------------------------------------------------
def get_toolchain(fips_dir, proj_dir, cfg) :
"""get the toolchain path location for a config, this first checks
for a 'cmake-toolchain' attribute, and if this does not exist, builds
a xxx.toolchain.cmake file from the platform name (only for cross-
compiling platforms). Toolchain files are searched in the
following locations:
- a fips-toolchains subdirectory in the project directory
- a fips-toolchains subdirectory in all imported projects
- finally in the cmake-toolchains subdirectory of the fips directory
:param fips_dir: absolute path to fips
:param plat: the target platform name
:returns: path to toolchain file or None for non-cross-compiling
"""
# ignore native target platforms
if 'platform' in cfg :
if cfg['platform'] in native_platforms :
return None
else :
log.error("config has no 'platform' attribute!'")
# build toolchain file name
toolchain = None
if 'cmake-toolchain' in cfg :
toolchain = cfg['cmake-toolchain']
else :
toolchain = '{}.toolchain.cmake'.format(cfg['platform'])
# look for toolchain file in current project directory
toolchain_path = '{}/fips-toolchains/{}'.format(proj_dir, toolchain)
if os.path.isfile(toolchain_path) :
return toolchain_path
else :
# look for toolchain in all imported directories
_, imported_projs = dep.get_all_imports_exports(fips_dir, proj_dir)
for imported_proj_name in imported_projs :
imported_proj_dir = util.get_project_dir(fips_dir, imported_proj_name)
toolchain_path = '{}/fips-toolchains/{}'.format(imported_proj_dir, toolchain)
if os.path.isfile(toolchain_path) :
return toolchain_path
else :
# toolchain is not in current project or imported projects,
# try the fips directory
toolchain_path = '{}/cmake-toolchains/{}'.format(fips_dir, toolchain)
if os.path.isfile(toolchain_path) :
return toolchain_path
# fallthrough: no toolchain file found
return None
#-------------------------------------------------------------------------------
def exists(pattern, proj_dirs) :
"""test if at least one matching config exists
:param pattern: config name pattern (e.g. 'linux-make-*')
:param proj_dir: array of toplevel dirs to search (must have /configs subdir)
:returns: True if at least one matching config exists
"""
for curDir in proj_dirs :
if len(glob.glob('{}/configs/{}.yml'.format(curDir, pattern))) > 0 :
return True
return False
#-------------------------------------------------------------------------------
def get_config_dirs(fips_dir, proj_dir) :
"""return list of config directories, including all imports
:param fips_dir: absolute fips directory
:param proj_dir: absolute project directory
:returns: list of all directories with config files
"""
dirs = [ fips_dir + '/configs' ]
if fips_dir != proj_dir :
success, result = dep.get_all_imports_exports(fips_dir, proj_dir)
if success :
for dep_proj_name in result :
dep_proj_dir = util.get_project_dir(fips_dir, dep_proj_name)
dep_configs_dir = dep_proj_dir + '/fips-configs'
if os.path.isdir(dep_configs_dir) :
dirs.append(dep_configs_dir)
else :
log.warn("missing import directories, please run 'fips fetch'")
return dirs
#-------------------------------------------------------------------------------
def list(fips_dir, proj_dir, pattern) :
"""return { dir : [cfgname, ...] } in fips_dir/configs and
proj_dir/fips-configs
:param fips_dir: absolute fips directory
:param proj_dir: absolute project directory
:param pattern: global pattern for config-name(s)
:returns: a map of matching configs per dir
"""
dirs = get_config_dirs(fips_dir, proj_dir)
res = OrderedDict()
for curDir in dirs :
res[curDir] = []
paths = glob.glob('{}/*.yml'.format(curDir))
for path in paths :
fname = os.path.split(path)[1]
fname = os.path.splitext(fname)[0]
res[curDir].append(fname)
return res
#-------------------------------------------------------------------------------
def load(fips_dir, proj_dir, pattern) :
"""load one or more matching configs from fips and current project dir
:param fips_dir: absolute fips directory
:param proj_dir: absolute project directory
:param pattern: config name pattern (e.g. 'linux-make-*')
:returns: an array of loaded config objects
"""
dirs = get_config_dirs(fips_dir, proj_dir)
configs = []
for curDir in dirs :
paths = glob.glob('{}/{}.yml'.format(curDir, pattern))
for path in paths :
try :
with open(path, 'r') as f :
cfg = yaml.load(f)
folder, fname = os.path.split(path)
# patch path, folder, and name
cfg['path'] = path
cfg['folder'] = folder
cfg['name'] = os.path.splitext(fname)[0]
if 'defines' not in cfg :
cfg['defines'] = None
configs.append(cfg)
except yaml.error.YAMLError, e:
log.error('YML parse error: {}', e.message)
return configs
#-------------------------------------------------------------------------------
def check_build_tool(fips_dir, tool_name) :
"""check if a build tool is installed"""
if tool_name == 'cmake' :
return cmake.check_exists(fips_dir)
elif tool_name == 'make' :
return make.check_exists(fips_dir)
elif tool_name == 'ninja' :
return ninja.check_exists(fips_dir)
elif tool_name == 'xcodebuild' :
return xcodebuild.check_exists(fips_dir)
else :
return False;
#-------------------------------------------------------------------------------
def check_sdk(fips_dir, platform_name) :
"""check whether an external crossplatform-SDK is installed"""
if platform_name == 'emscripten' :
return emscripten.check_exists(fips_dir)
elif platform_name == 'pnacl' :
return nacl.check_exists(fips_dir)
elif platform_name == 'android' :
return android.check_exists(fips_dir)
else :
return True
#-------------------------------------------------------------------------------
def check_config_valid(fips_dir, proj_dir, cfg, print_errors=False) :
"""check if provided config is valid, and print errors if not
:param cfg: a loaded config object
:returns: (True, [ messages ]) tuple with result and error messages
"""
messages = []
valid = True
# check whether all required fields are present
# (NOTE: name and folder should always be present since they are appended
# during loading)
required_fields = ['name', 'folder', 'platform', 'generator', 'build_tool', 'build_type']
for field in required_fields :
if field not in cfg :
messages.append("missing field '{}' in '{}'".format(field, cfg['path']))
valid = False
# check if the target platform SDK is installed
if not check_sdk(fips_dir, cfg['platform']) :
messages.append("platform sdk for '{}' not installed (see './fips help setup')".format(cfg['platform']))
valid = False
# check if the generator name is valid
if not valid_generator(cfg['generator']) :
messages.append("invalid generator name '{}' in '{}'".format(cfg['generator'], cfg['path']))
valid = False
# check if build tool is valid
if not valid_build_tool(cfg['build_tool']) :
messages.append("invalid build_tool name '{}' in '{}'".format(cfg['build_tool'], cfg['path']))
valid = False
# check if the build tool can be found
if not check_build_tool(fips_dir, cfg['build_tool']) :
messages.append("build tool '{}' not found".format(cfg['build_tool']))
valid = False
# check if build type is valid (Debug, Release, Profiling)
if not valid_build_type(cfg['build_type']) :
messages.append("invalid build_type '{}' in '{}'".format(cfg['build_type'], cfg['path']))
valid = False
# check if the toolchain file can be found (if this is a crosscompiling toolchain)
if cfg['platform'] not in native_platforms :
toolchain_path = get_toolchain(fips_dir, proj_dir, cfg)
if not toolchain_path :
messages.append("toolchain file not found for config '{}'!".format(cfg['name']))
valid = False
if print_errors :
for msg in messages :
log.error(msg, False)
return (valid, messages)
| |
from app import db
from flask import current_app
from flask.ext.login import UserMixin
from . import login_manager
from hashlib import md5
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import random
import os
import requests
IMAGE_TYPES = ['poster', 'series', 'fanart', 'season']
STATUSES = {'Continuing': 'c', 'Ended': 'e', 'On Hiatus': 'h', 'Other': 'o'}
# Add inverse mapping
STATUSES.update(dict((STATUSES[k], k) for k in STATUSES))
DAYS_OF_WEEK = {'Sunday': 'su',
'Monday': 'mo',
'Tuesday': 'tu',
'Wednesday': 'we',
'Thursday': 'th',
'Friday': 'fr',
'Saturday': 'sa'}
# Add inverse mapping
DAYS_OF_WEEK.update(dict((DAYS_OF_WEEK[k], k) for k in DAYS_OF_WEEK))
user_images = db.Table('user_images',
db.Column('user_id', db.Integer,
db.ForeignKey('users.id'), nullable=False),
db.Column('image_id', db.Integer,
db.ForeignKey('images.id'), nullable=False))
user_series = db.Table('user_series',
db.Column('user_id', db.Integer,
db.ForeignKey('users.id'), nullable=False),
db.Column('series_id', db.Integer,
db.ForeignKey('series.id'), nullable=False))
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', cascade="all,delete")
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
email = db.Column(db.String(120), index=True, unique=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
images = db.relationship('Image', secondary=user_images, lazy='dynamic')
favorites = db.relationship('Series', secondary=user_series,
lazy='dynamic')
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
last_seen = db.Column(db.DateTime)
def save_images(self, series):
for image in series.images:
self.images.append(image)
db.session.merge(self)
db.session.commit()
def upcoming(self):
return [
{
'air_date': 'April 18, 2014',
'series': {'name': 'Game of Thrones'},
'name': 'Song of Ice and Fire',
},
{
'air_date': 'September 11, 2014',
'series': {'name': 'Breaking Bad'},
'name': 'Felina',
},
{
'air_date': 'December 7, 2014',
'series': {'name': 'Star Trek: The Next Generation'},
'name': 'Inner Light',
},
]
def avatar(self, size):
return ('http://www.gravatar.com/avatar/' +
md5(self.email).hexdigest() + '?d=mm&s=' + str(size))
def __repr__(self):
return '<User %r>' % (self.name)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Series(db.Model):
__tablename__ = 'series'
id = db.Column(db.Integer, primary_key=True)
air_day = db.Column(db.Enum('su', 'mo', 'tu', 'we', 'th', 'fr', 'sa',
name='day_of_week'))
air_time = db.Column(db.Time)
first_aired = db.Column(db.Date)
network = db.Column(db.String())
overview = db.Column(db.String())
rating = db.Column(db.Numeric(3, 1))
rating_count = db.Column(db.Integer)
runtime = db.Column(db.Integer)
name = db.Column(db.String(), nullable=False)
status = db.Column(db.Enum('c', 'e', 'h', 'o', name='status'))
last_updated = db.Column(db.DateTime)
episodes = db.relationship('Episode', backref='series', lazy='dynamic')
images = db.relationship('Image', backref='series', lazy='dynamic')
def __repr__(self):
return '<Series %r>' % (self.name)
def __unicode__(self):
return '<Series %r>' % (self.name)
def image(self, type, user):
images = self.all_images(type, user)
return (random.choice(images) if images else "")
def all_images(self, type, user):
if self in user.favorites.all():
return user.images.filter_by(series=self, type=type).all()
else:
return self.images.filter_by(type=type, episode=None).all()
class Episode(db.Model):
__tablename__ = 'episodes'
id = db.Column(db.Integer, primary_key=True)
series_id = db.Column(db.Integer, db.ForeignKey('series.id'),
nullable=False)
season = db.Column(db.Integer)
episode_number = db.Column(db.Integer)
name = db.Column(db.String())
overview = db.Column(db.String())
rating = db.Column(db.Numeric(3, 1))
rating_count = db.Column(db.Integer)
air_date = db.Column(db.Date)
images = db.relationship('Image', backref='episode', lazy='dynamic')
def __repr__(self):
return '<Episode %r>' % (self.name)
def __unicode__(self):
return '<Episode %r>' % (self.name)
def image(self, user):
return (random.choice(self.images.all()) if self.images.all() else "")
class Image(db.Model):
__tablename__ = 'images'
id = db.Column(db.Integer, primary_key=True)
episode_id = db.Column(db.Integer, db.ForeignKey('episodes.id'),
nullable=True)
series_id = db.Column(db.Integer, db.ForeignKey('series.id'),
nullable=False)
source = db.Column(db.String, nullable=False, unique=True)
key = db.Column(db.String, nullable=False, unique=True)
type = db.Column(db.Enum('poster', 'series', 'fanart', 'season',
name='image_types'), nullable=False)
def save(self):
conn = S3Connection(current_app.config['AWS_ACCESS_KEY'],
current_app.config['AWS_SECRET_KEY'])
bucket = conn.get_bucket(current_app.config['AWS_BUCKET'],
validate=False)
key = Key(bucket, self.key)
if not key.exists():
current_app.logger.debug("Saving image: %s" % self.source)
r = requests.get(self.source)
if r.status_code == 200:
key.set_contents_from_string(r.content)
else:
current_app.logger.debug("Image: %s already saved." % self.key)
def get_url(self):
conn = S3Connection(current_app.config['AWS_ACCESS_KEY'],
current_app.config['AWS_SECRET_KEY'])
bucket = conn.get_bucket(current_app.config['AWS_BUCKET'],
validate=False)
key = Key(bucket, self.key)
return key.generate_url(600)
def __repr__(self):
return '<Image %r>' % (self.key)
def __unicode__(self):
return '<Image %r>' % (self.key)
| |
"""Tasks related to projects, including fetching repository code, cleaning
``conf.py`` files, and rebuilding documentation.
"""
import fnmatch
import os
import shutil
import json
import logging
import socket
import requests
import datetime
from celery import task
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from slumber.exceptions import HttpClientError
from builds.constants import LATEST
from builds.models import Build, Version
from core.utils import send_email, run_on_app_servers
from doc_builder.loader import get_builder_class
from doc_builder.base import restoring_chdir
from doc_builder.environments import DockerEnvironment
from projects.exceptions import ProjectImportError
from projects.models import ImportedFile, Project
from projects.utils import run, make_api_version, make_api_project
from projects.constants import LOG_TEMPLATE
from builds.constants import STABLE
from projects import symlinks
from privacy.loader import Syncer
from tastyapi import api, apiv2
from search.parse_json import process_all_json_files
from search.utils import process_mkdocs_json
from restapi.utils import index_search_request
from vcs_support import utils as vcs_support_utils
import tastyapi
try:
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
except:
from projects.signals import before_vcs, after_vcs, before_build, after_build
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
@task(default_retry_delay=7 * 60, max_retries=5)
@restoring_chdir
def update_docs(pk, version_pk=None, build_pk=None, record=True, docker=False,
search=True, force=False, intersphinx=True, localmedia=True,
api=None, basic=False, **kwargs):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
# Dependency injection to allow for testing
if api is None:
api = tastyapi.api
apiv2 = tastyapi.apiv2
else:
apiv2 = api
start_time = datetime.datetime.utcnow()
try:
project_data = api.project(pk).get()
except HttpClientError:
log.exception(LOG_TEMPLATE.format(project=pk, version='', msg='Failed to get project data on build. Erroring.'))
project = make_api_project(project_data)
# Don't build skipped projects
if project.skip:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Skipping'))
return
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Building'))
version = ensure_version(api, project, version_pk)
build = create_build(build_pk)
results = {}
# Build Servery stuff
try:
record_build(api=api, build=build, record=record, results=results, state='cloning')
vcs_results = setup_vcs(version, build, api)
if vcs_results:
results.update(vcs_results)
if project.documentation_type == 'auto':
update_documentation_type(version, apiv2)
if docker or settings.DOCKER_ENABLE:
record_build(api=api, build=build, record=record, results=results, state='building')
docker = DockerEnvironment(version)
build_results = docker.build()
results.update(build_results)
else:
record_build(api=api, build=build, record=record, results=results, state='installing')
setup_results = setup_environment(version)
results.update(setup_results)
record_build(api=api, build=build, record=record, results=results, state='building')
build_results = build_docs(version, force, search, localmedia)
results.update(build_results)
except vcs_support_utils.LockTimeout, e:
results['checkout'] = (423, "", "Version locked, retrying in 5 minutes.")
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to lock, will retry"))
# http://celery.readthedocs.org/en/3.0/userguide/tasks.html#retrying
# Should completely retry the task for us until max_retries is exceeded
update_docs.retry(exc=e, throw=False)
except ProjectImportError, e:
results['checkout'] = (404, "", 'Failed to import project; skipping build.\n\nError\n-----\n\n%s' % e.message)
# Close out build in finally with error.
pass
except Exception, e:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Top-level Build Failure"), exc_info=True)
results['checkout'] = (404, "", 'Top-level Build Failure: %s' % e.message)
finally:
record_build(api=api, build=build, record=record, results=results, state='finished', start_time=start_time)
record_pdf(api=api, record=record, results=results, state='finished', version=version)
log.info(LOG_TEMPLATE.format(project=version.project.slug, version='', msg='Build finished'))
build_id = build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=results.get('html', [404])[0] == 0,
localmedia=results.get('localmedia', [404])[0] == 0,
search=results.get('search', [404])[0] == 0,
pdf=version.project.enable_pdf_build,
epub=version.project.enable_epub_build,
)
def ensure_version(api, project, version_pk):
"""
Ensure we're using a sane version.
"""
if version_pk:
version_data = api.version(version_pk).get()
else:
version_data = api.version(project.slug).get(slug=LATEST)['objects'][0]
version = make_api_version(version_data)
return version
def update_documentation_type(version, api):
"""
Automatically determine the doc type for a user.
"""
checkout_path = version.project.checkout_path(version.slug)
os.chdir(checkout_path)
files = run('find .')[1].split('\n')
markdown = sphinx = 0
for filename in files:
if fnmatch.fnmatch(filename, '*.md') or fnmatch.fnmatch(filename, '*.markdown'):
markdown += 1
elif fnmatch.fnmatch(filename, '*.rst'):
sphinx += 1
ret = 'sphinx'
if markdown > sphinx:
ret = 'mkdocs'
project_data = api.project(version.project.pk).get()
project_data['documentation_type'] = ret
api.project(version.project.pk).put(project_data)
version.project.documentation_type = ret
def docker_build(version, search=True, force=False, intersphinx=True,
localmedia=True):
"""
The code that executes inside of docker
"""
environment_results = setup_environment(version)
results = build_docs(version=version, force=force, search=search,
localmedia=localmedia)
results.update(environment_results)
return results
def setup_vcs(version, build, api):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
"""
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg='Updating docs from VCS'))
try:
update_output = update_imported_docs(version.pk, api)
commit = version.project.vcs_repo(version.slug).commit
if commit:
build['commit'] = commit
except ProjectImportError:
log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='Failed to import project; skipping build'), exc_info=True)
raise
return update_output
@task()
def update_imported_docs(version_pk, api=None):
"""
Check out or update the given project's repository.
"""
if api is None:
api = tastyapi.api
version_data = api.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
before_vcs.send(sender=version)
# Get the actual code on disk
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(
version.identifier,
)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = LATEST
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
apiv2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
def setup_environment(version):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
"""
ret_dict = {}
project = version.project
build_dir = os.path.join(project.venv_path(version=version.slug), 'build')
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='Removing existing build dir'))
shutil.rmtree(build_dir)
if project.use_system_packages:
site_packages = '--system-site-packages'
else:
site_packages = '--no-site-packages'
# Here the command has been modified to support different
# interpreters.
ret_dict['venv'] = run(
'{cmd} {site_packages} {path}'.format(
cmd='{interpreter} -m virtualenv'.format(
interpreter=project.python_interpreter),
site_packages=site_packages,
path=project.venv_path(version=version.slug)
)
)
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
if project.use_system_packages:
ignore_option = '-I'
else:
ignore_option = ''
requirements = ' '.join([
'sphinx==1.3.1',
'Pygments==2.0.2',
'virtualenv==1.10.1',
'setuptools==1.1',
'docutils==0.11',
'mkdocs==0.13.3',
'mock==1.0.1',
'pillow==2.6.1',
'readthedocs-sphinx-ext==0.5.4',
'sphinx-rtd-theme==0.1.8',
'alabaster>=0.7,<0.8,!=0.7.5',
'recommonmark==0.1.1',
])
wheeldir = os.path.join(settings.SITE_ROOT, 'deploy', 'wheels')
ret_dict['doc_builder'] = run(
(
'{cmd} install --use-wheel --find-links={wheeldir} -U '
'{ignore_option} {requirements}'
).format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
ignore_option=ignore_option,
wheeldir=wheeldir,
requirements=requirements,
)
)
# Handle requirements
requirements_file_path = project.requirements_file
checkout_path = project.checkout_path(version.slug)
if not requirements_file_path:
builder_class = get_builder_class(project.documentation_type)
docs_dir = builder_class(version).docs_dir()
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
print('Testing %s' % test_path)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
os.chdir(checkout_path)
ret_dict['requirements'] = run(
'{cmd} install --exists-action=w -r {requirements}'.format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
requirements=requirements_file_path))
# Handle setup.py
os.chdir(project.checkout_path(version.slug))
if os.path.isfile("setup.py"):
if getattr(settings, 'USE_PIP_INSTALL', False):
ret_dict['install'] = run(
'{cmd} install --ignore-installed .'.format(
cmd=project.venv_bin(version=version.slug, bin='pip')))
else:
ret_dict['install'] = run(
'{cmd} setup.py install --force'.format(
cmd=project.venv_bin(version=version.slug,
bin='python')))
else:
ret_dict['install'] = (999, "", "No setup.py, skipping install")
return ret_dict
@task()
def build_docs(version, force, search, localmedia):
"""
This handles the actual building of the documentation
"""
project = version.project
results = {}
before_build.send(sender=version)
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
html_builder = get_builder_class(project.documentation_type)(version)
if force:
html_builder.force()
html_builder.append_conf()
results['html'] = html_builder.build()
if results['html'][0] == 0:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
pass
fake_results = (999, "Project Skipped, Didn't build",
"Project Skipped, Didn't build")
if 'mkdocs' in project.documentation_type:
if search:
try:
search_builder = get_builder_class('mkdocs_json')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
if 'sphinx' in project.documentation_type:
# Search builder. Creates JSON from docs and sends it to the
# server.
if search:
try:
search_builder = get_builder_class('sphinx_search')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
# Copy json for safe keeping
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
# Local media builder for singlepage HTML download archive
if localmedia:
try:
localmedia_builder = get_builder_class('sphinx_singlehtmllocalmedia')(version)
results['localmedia'] = localmedia_builder.build()
if results['localmedia'][0] == 0:
localmedia_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="Local Media HTML Build Error"), exc_info=True)
# Optional build steps
if version.project.slug not in HTML_ONLY and not project.skip:
if project.enable_pdf_build:
pdf_builder = get_builder_class('sphinx_pdf')(version)
results['pdf'] = pdf_builder.build()
# Always move pdf results even when there's an error.
# if pdf_results[0] == 0:
pdf_builder.move()
else:
results['pdf'] = fake_results
if project.enable_epub_build:
epub_builder = get_builder_class('sphinx_epub')(version)
results['epub'] = epub_builder.build()
if results['epub'][0] == 0:
epub_builder.move()
else:
results['epub'] = fake_results
after_build.send(sender=version)
return results
def create_build(build_pk):
"""
Old placeholder for build creation. Now it just gets it from the database.
"""
if build_pk:
build = api.build(build_pk).get()
for key in ['project', 'version', 'resource_uri', 'absolute_uri']:
if key in build:
del build[key]
else:
build = {}
return build
def record_build(api, record, build, results, state, start_time=None):
"""
Record a build by hitting the API.
Returns nothing
"""
if not record:
return None
build['builder'] = socket.gethostname()
setup_steps = ['checkout', 'venv', 'doc_builder', 'requirements', 'install']
output_steps = ['html']
all_steps = setup_steps + output_steps
build['state'] = state
if 'html' in results:
build['success'] = results['html'][0] == 0
else:
build['success'] = False
# Set global state
# for step in all_steps:
# if results.get(step, False):
# if results.get(step)[0] != 0:
# results['success'] = False
build['exit_code'] = max([results.get(step, [0])[0] for step in all_steps])
build['setup'] = build['setup_error'] = ""
build['output'] = build['error'] = ""
if start_time:
build['length'] = (datetime.datetime.utcnow() - start_time).total_seconds()
for step in setup_steps:
if step in results:
build['setup'] += "\n\n%s\n-----\n\n" % step
try:
build['setup'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['setup_error'] += "\n\n%s\n-----\n\n" % step
try:
build['setup_error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
for step in output_steps:
if step in results:
build['output'] += "\n\n%s\n-----\n\n" % step
try:
build['output'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['error'] += "\n\n%s\n-----\n\n" % step
try:
build['error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
# Attempt to stop unicode errors on build reporting
for key, val in build.items():
if isinstance(val, basestring):
build[key] = val.decode('utf-8', 'ignore')
try:
api.build(build['id']).put(build)
except Exception:
log.error("Unable to post a new build", exc_info=True)
def record_pdf(api, record, results, state, version):
if not record or 'sphinx' not in version.project.documentation_type:
return None
if not version.project.enable_pdf_build:
return None
try:
if 'pdf' in results:
pdf_exit = results['pdf'][0]
pdf_success = pdf_exit == 0
pdf_output = results['pdf'][1]
pdf_error = results['pdf'][2]
else:
pdf_exit = 999
pdf_success = False
pdf_output = pdf_error = "PDF Failed"
pdf_output = pdf_output.decode('utf-8', 'ignore')
pdf_error = pdf_error.decode('utf-8', 'ignore')
if 'Output written on' in pdf_output:
pdf_success = True
api.build.post(dict(
state=state,
project='/api/v1/project/%s/' % version.project.pk,
version='/api/v1/version/%s/' % version.pk,
success=pdf_success,
type='pdf',
output=pdf_output,
error=pdf_error,
exit_code=pdf_exit,
builder=socket.gethostname(),
))
except Exception:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to post a new build"), exc_info=True)
###########
# Web tasks
###########
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False,
localmedia=False, search=False, pdf=False, epub=False):
"""
Build Finished, do house keeping bits
"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlinks.symlink_cnames(version)
symlinks.symlink_translations(version)
symlinks.symlink_subprojects(version)
if version.project.single_version:
symlinks.symlink_single_version(version)
else:
symlinks.remove_symlink_single_version(version)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
if not html and version.slug != STABLE and build.exit_code != 423:
send_notifications.delay(version.pk, build_pk=build.pk)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False, pdf=False, epub=False):
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(version=version.slug, type=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_localmedia')
to_path = version.project.get_production_media_path(type='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_search')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_pdf')
to_path = version.project.get_production_media_path(type='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_epub')
to_path = version.project.get_production_media_path(type='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug, type='mkdocs_json')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit):
version = Version.objects.get(pk=version_pk)
if 'sphinx' in version.project.documentation_type:
page_list = process_all_json_files(version, build_dir=False)
elif 'mkdocs' in version.project.documentation_type:
page_list = process_mkdocs_json(version, build_dir=False)
else:
log.error('Unknown documentation type: %s' % version.project.documentation_type)
return
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]" % (version.project.slug, log_msg))
index_search_request(
version=version,
page_list=page_list,
commit=commit,
project_scale=0,
page_scale=0,
# Don't index sections to speed up indexing.
# They aren't currently exposed anywhere.
section=False,
)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
if not commit:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Imported File not being built because no commit information'))
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Creating ImportedFiles'))
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if fnmatch.fnmatch(filename, '*.html'):
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
obj, created = ImportedFile.objects.get_or_create(
project=project,
version=version,
path=dirpath,
name=filename,
commit=commit,
)
if not created:
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=project, version=version).exclude(commit=commit).delete()
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='No ImportedFile files'))
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE.format(project=project.slug, version='', msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk, path=None):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
if not path:
path = project.static_metadata_path()
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
fh = open(path, 'w+')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
#@periodic_task(run_every=crontab(hour="*", minute="*/5", day_of_week="*"))
def update_docs_pull(record=False, force=False):
"""
A high-level interface that will update all of the projects.
This is mainly used from a cronjob or management command.
"""
for version in Version.objects.filter(built=True):
try:
update_docs(
pk=version.project.pk, version_pk=version.pk, record=record)
except Exception, e:
log.error("update_docs_pull failed", exc_info=True)
##############
# Random Tasks
##############
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s" % path)
shutil.rmtree(path)
@task(queue='web')
def clear_artifacts(version_pk):
""" Remove artifacts from the web servers. """
version = Version.objects.get(pk=version_pk)
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='pdf', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='epub', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='htmlzip', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.rtd_build_path(version=version.slug))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.