repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
raghavrv/scikit-learn | sklearn/ensemble/tests/test_forest.py | 6 | 42990 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.utils.fixes import comb
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in np.bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
| bsd-3-clause |
ahhda/sympy | sympy/core/tests/test_trace.py | 99 | 2825 | from sympy import symbols, Matrix, Tuple
from sympy.core.trace import Tr
from sympy.utilities.pytest import raises
def test_trace_new():
a, b, c, d, Y = symbols('a b c d Y')
A, B, C, D = symbols('A B C D', commutative=False)
assert Tr(a + b) == a + b
assert Tr(A + B) == Tr(A) + Tr(B)
#check trace args not implicitly permuted
assert Tr(C*D*A*B).args[0].args == (C, D, A, B)
# check for mul and adds
assert Tr((a*b) + ( c*d)) == (a*b) + (c*d)
# Tr(scalar*A) = scalar*Tr(A)
assert Tr(a*A) == a*Tr(A)
assert Tr(a*A*B*b) == a*b*Tr(A*B)
# since A is symbol and not commutative
assert isinstance(Tr(A), Tr)
#POW
assert Tr(pow(a, b)) == a**b
assert isinstance(Tr(pow(A, a)), Tr)
#Matrix
M = Matrix([[1, 1], [2, 2]])
assert Tr(M) == 3
##test indices in different forms
#no index
t = Tr(A)
assert t.args[1] == Tuple()
#single index
t = Tr(A, 0)
assert t.args[1] == Tuple(0)
#index in a list
t = Tr(A, [0])
assert t.args[1] == Tuple(0)
t = Tr(A, [0, 1, 2])
assert t.args[1] == Tuple(0, 1, 2)
#index is tuple
t = Tr(A, (0))
assert t.args[1] == Tuple(0)
t = Tr(A, (1, 2))
assert t.args[1] == Tuple(1, 2)
#trace indices test
t = Tr((A + B), [2])
assert t.args[0].args[1] == Tuple(2) and t.args[1].args[1] == Tuple(2)
t = Tr(a*A, [2, 3])
assert t.args[1].args[1] == Tuple(2, 3)
#class with trace method defined
#to simulate numpy objects
class Foo:
def trace(self):
return 1
assert Tr(Foo()) == 1
#argument test
# check for value error, when either/both arguments are not provided
raises(ValueError, lambda: Tr())
raises(ValueError, lambda: Tr(A, 1, 2))
def test_trace_doit():
a, b, c, d = symbols('a b c d')
A, B, C, D = symbols('A B C D', commutative=False)
#TODO: needed while testing reduced density operations, etc.
def test_permute():
A, B, C, D, E, F, G = symbols('A B C D E F G', commutative=False)
t = Tr(A*B*C*D*E*F*G)
assert t.permute(0).args[0].args == (A, B, C, D, E, F, G)
assert t.permute(2).args[0].args == (F, G, A, B, C, D, E)
assert t.permute(4).args[0].args == (D, E, F, G, A, B, C)
assert t.permute(6).args[0].args == (B, C, D, E, F, G, A)
assert t.permute(8).args[0].args == t.permute(1).args[0].args
assert t.permute(-1).args[0].args == (B, C, D, E, F, G, A)
assert t.permute(-3).args[0].args == (D, E, F, G, A, B, C)
assert t.permute(-5).args[0].args == (F, G, A, B, C, D, E)
assert t.permute(-8).args[0].args == t.permute(-1).args[0].args
t = Tr((A + B)*(B*B)*C*D)
assert t.permute(2).args[0].args == (C, D, (A + B), (B**2))
t1 = Tr(A*B)
t2 = t1.permute(1)
assert id(t1) != id(t2) and t1 == t2
| bsd-3-clause |
3dfxmadscientist/CBSS | addons/resource/__openerp__.py | 57 | 1993 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Resource',
'version' : '1.1',
'author' : 'OpenERP SA',
'category' : 'Hidden/Dependency',
'website' : 'http://www.openerp.com',
'description': """
Module for resource management.
===============================
A resource represent something that can be scheduled (a developer on a task or a
work center on manufacturing orders). This module manages a resource calendar
associated to every resource. It also manages the leaves of every resource.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['process'],
'data': [
'security/ir.model.access.csv',
'resource_view.xml',
],
'demo': ['resource_demo.xml'],
'test': [
'test/resource.yml',
'test/duplicate_resource.yml',
],
'installable': True,
'auto_install': False,
'images': ['images/resource_leaves_calendar.jpeg','images/resource_leaves_form.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
markeTIC/l10n-spain | l10n_es_aeat_mod115/__openerp__.py | 2 | 1446 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'AEAT modelo 115',
'version': '8.0.1.2.0',
'category': "Localisation/Accounting",
'author': "Serv. Tecnol. Avanzados - Pedro M. Baeza,"
"AvanzOSC,"
"Antiun Ingenierรญa S.L.,"
"Odoo Community Association (OCA)",
'website': "https://github.com/OCA/l10n-spain",
'license': 'AGPL-3',
'depends': ['l10n_es_aeat'],
'data': [
'wizard/export_mod115_to_boe.xml',
'views/mod115_view.xml',
'security/ir.model.access.csv'],
'installable': True,
}
| agpl-3.0 |
davidwilson-85/easymap | graphic_output/Pillow-4.2.1/Tests/test_imagefont_bitmap.py | 1 | 1533 | from helper import unittest, PillowTestCase
from PIL import Image, ImageFont, ImageDraw
image_font_installed = True
try:
ImageFont.core.getfont
except ImportError:
image_font_installed = False
@unittest.skipIf(not image_font_installed, "image font not installed")
class TestImageFontBitmap(PillowTestCase):
def test_similar(self):
text = 'EmbeddedBitmap'
font_outline = ImageFont.truetype(
font='Tests/fonts/DejaVuSans.ttf', size=24)
font_bitmap = ImageFont.truetype(
font='Tests/fonts/DejaVuSans-bitmap.ttf', size=24)
size_outline = font_outline.getsize(text)
size_bitmap = font_bitmap.getsize(text)
size_final = max(size_outline[0], size_bitmap[0]), max(size_outline[1], size_bitmap[1])
im_bitmap = Image.new('RGB', size_final, (255, 255, 255))
im_outline = im_bitmap.copy()
draw_bitmap = ImageDraw.Draw(im_bitmap)
draw_outline = ImageDraw.Draw(im_outline)
# Metrics are different on the bitmap and ttf fonts,
# more so on some platforms and versions of freetype than others.
# Mac has a 1px difference, linux doesn't.
draw_bitmap.text((0, size_final[1] - size_bitmap[1]),
text, fill=(0, 0, 0), font=font_bitmap)
draw_outline.text((0, size_final[1] - size_outline[1]),
text, fill=(0, 0, 0), font=font_outline)
self.assert_image_similar(im_bitmap, im_outline, 20)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
amjames/psi4 | psi4/driver/qcdb/orca.py | 1 | 13098 | #
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
from collections import defaultdict
from .pdict import PreservingDict
from .molecule import Molecule
from .physconst import *
def harvest(p4Mol, orca_out, **largs):
"""Harvest variables, gradient, and the molecule from the output and other
files
"""
# Split into lines as it is much easier to find what is needed
out_lines = orca_out.split('\n')
mol = harvest_molecule_from_outfile(out_lines)
file_name = "NONE"
grad = harvest_engrad(file_name)
# Harvest energies and properties from the output file
psivar = PreservingDict()
harvest_scf_from_outfile(out_lines, psivar)
harvest_dipole(out_lines, psivar)
harvest_mp2(out_lines, psivar)
harvest_coupled_cluster(out_lines, psivar)
return psivar, grad, mol
def muster_memory(mem):
"""Transform input *mem* in MB into psi4-type options for orca.
"""
# prepare memory keywords to be set as c-side keywords
options = defaultdict(lambda: defaultdict(dict))
options['ORCA']['ORCA_MAXCORE']['value'] = int(mem)
text = "%MaxCore {}\n".format(options['ORCA']['ORCA_MAXCORE']['value'])
for item in options['ORCA']:
options['ORCA'][item]['clobber'] = True
return text, options
def muster_modelchem(name, dertype):
"""Transform calculation method *name* and derivative level *dertype*
into options for orca. While deliberately requested pieces,
generally orca__orca_deriv_level and orca__orca_calc_level,
are set to complain if contradicted ('clobber' set to True), other
'recommended' settings, can be countermanded by keywords in input file
('clobber' set to False). Occasionally, we want these pieces to actually
overcome keywords in input file ('superclobber' set to True).
"""
text = ''
lowername = name.lower()
options = defaultdict(lambda: defaultdict(dict))
if dertype == 0:
options['ORCA']['ORCA_RUNTYP']['value'] = 'ENERGY'
elif dertype == 1:
options['ORCA']['ORCA_RUNTYP']['value'] = 'ENGRAD'
else:
raise ValidationError("Requested Orca dertype {} is not available."
.format(dertype))
if lowername == 'orca':
pass
elif lowername == 'orca-b3lyp':
options['ORCA']['ORCA_FUNCTIONAL']['value'] = 'B3LYP_G'
elif lowername == 'orca-mp2':
options['ORCA']['ORCA_CALC_LEVEL']['value'] = 'MP2'
elif lowername == 'orca-ccsd':
options['ORCA']['ORCA_CALC_LEVEL']['value'] = 'CCSD'
elif lowername == 'orca-ccsd(t)':
options['ORCA']['ORCA_CALC_LEVEL']['value'] = 'CCSD(T)'
else:
raise ValidationError("Requested Orca computational methods {} is not "
"available." .format(lowername))
# Set clobbering
if 'ORCA_RUNTYP' in options['ORCA']:
options['ORCA']['ORCA_RUNTYP']['clobber'] = True
options['ORCA']['ORCA_RUNTYP']['superclobber'] = True
if 'ORCA_FUNCTIONAL' in options['ORCA']:
options['ORCA']['ORCA_FUNCTIONAL']['clobber'] = True
options['ORCA']['ORCA_FUNCTIONAL']['superclobber'] = True
return text, options
def orca_list():
"""Return an array of Orca methods with energies. Appended
to procedures['energy'].
"""
val = []
val.append('orca')
val.append('orca-b3lyp')
return val
def orca_gradient_list():
"""Return an array of Orca methods with analytical gradients.
Appended to procedures['gradient'].
"""
val = []
val.append('oc-b3lyp')
return val
def harvest_molecule_from_outfile(lines):
"""Return a molecule of the last geometry"""
"""Sample molecule block"""
#----------------------------
#CARTESIAN COORDINATES (A.U.)
#----------------------------
# NO LB ZA FRAG MASS X Y Z
# 0 O 8.0000 0 15.999 -0.043407801307192 -0.055556028344352 0.000000000000000
# 1 H 1.0000 0 1.008 1.780497256508764 -0.017018089151928 0.000000000000000
# 2 H 1.0000 0 1.008 -0.462170608038134 1.719154625261312 0.000000000000000
#
geom_start = find_start(lines, 'CARTESIAN COORDINATES (A.U.)')
if geom_start == -1:
return Molecule()
# Geometry starts 3 lines after header and ends with a blank line
geom_start += 3
end = ''
mol_str = ''
for i, line in enumerate(lines[geom_start:], start=geom_start):
if line == end:
break
num, atom, z, frag, mass, x, y, z = line.split()
mol_str += '{} {} {} {}\n'.format(atom, x, y, z)
return Molecule(mol_str)
def harvest_scf_from_outfile(lines, psivar):
"""Harvest SCF results from the SCF section of the output file"""
"""Sample SCF results block"""
#----------------
#TOTAL SCF ENERGY
#----------------
#
#Total Energy : -76.02602169 Eh -2068.77322 eV
#
#Components:
#Nuclear Repulsion : 9.12509697 Eh 248.30651 eV
#Electronic Energy : -85.15111867 Eh -2317.07974 eV
#
#One Electron Energy: -123.01434123 Eh -3347.39040 eV
#Two Electron Energy: 37.86322256 Eh 1030.31067 eV
#
#Virial components:
#Potential Energy : -151.99262033 Eh -4135.92947 eV
#Kinetic Energy : 75.96659864 Eh 2067.15624 eV
#Virial Ratio : 2.00078223
#
#
scf_start = find_start(lines, 'TOTAL SCF ENERGY')
if scf_start == -1:
return ''
# Energies in SCF block
psivar['SCF TOTAL ENERGY'] = float(lines[scf_start + 3].split()[3])
psivar['NUCLEAR REPULSION ENERGY'] = float(lines[scf_start + 6].split()[3])
def harvest_dipole(lines, psivar):
"""Harvest the dipole, and return as a tuple (x, y, z)
Multiple different dipole moments can be output if post-HF calculations are
run and their dipoles are requested resulting in highly similar blocks.
It by default collects the last which appears to always be the one requested
TODO: collect all the different types of dipole moments
"""
"""Sample dipole moment results block"""
#-------------
#DIPOLE MOMENT
#-------------
# X Y Z
#Electronic contribution: -0.11359 -0.14669 -0.00000
#Nuclear contribution : 0.61892 0.79867 0.00000
# -----------------------------------------
#Total Dipole Moment : 0.50533 0.65198 -0.00000
# -----------------------------------------
#Magnitude (a.u.) : 0.82489
#Magnitude (Debye) : 2.09670
#
#
dipole_start = find_start(lines, 'DIPOLE MOMENT')
if dipole_start != -1:
# Dipole x, y, z are the last items 6 lines down in the dipole block
dipole_str_list = lines[dipole_start + 6].split()[-3:]
# Convert the dipole to debye
dipole = [float(i)*psi_dipmom_au2debye for i in dipole_str_list]
psivar['CURRENT DIPOLE X'] = dipole[0]
psivar['CURRENT DIPOLE Y'] = dipole[1]
psivar['CURRENT DIPOLE Z'] = dipole[2]
# Dipole magnitude is 8 line down in the dipole block
magnitude = float(lines[dipole_start + 8][-1])
def harvest_mp2(lines, psivar):
"""Harvest the MP2 results"""
"""Sample MP2 energy line (works for both MP2 and RI-MP2)"""
#---------------------------------------
#MP2 TOTAL ENERGY: -76.226803665 Eh
#---------------------------------------
"""Sample MP2 correlation energy line (yes there is a space)"""
#-----------------------------------------------
# MP2 CORRELATION ENERGY : -0.125436532 Eh
#-----------------------------------------------
"""Sample RI-MP2 Correlation energy line (yes there is a space)"""
#-----------------------------------------------
# RI-MP2 CORRELATION ENERGY: -0.125496692 Eh
#-----------------------------------------------
for line in reversed(lines):
if line[:16] == 'MP2 TOTAL ENERGY':
psivar['MP2 TOTAL ENERGY'] = line.split()[-2]
break
for line in reversed(lines):
if line[:23] == ' MP2 CORRELATION ENERGY' or\
line[:26] == ' RI-MP2 CORRELATION ENERGY':
psivar['MP2 CORRELATION ENERGY'] = line.split()[-2]
break
def harvest_coupled_cluster(lines, psivar):
"""Harvest coupled cluster results
WARNING: Canonical and DLPNO print the coupled cluster results differently
"""
"""Sample (canonical) CCSD results block"""
#----------------------
#COUPLED CLUSTER ENERGY
#----------------------
#
#E(0) ... -76.063720080
#E(CORR) ... -0.288938791
#E(TOT) ... -76.352658871
#Singles Norm <S|S>**1/2 ... 0.021106262
#T1 diagnostic ... 0.007462191
#
"""Sample DLPNO coupled cluster block (CCSD)"""
#----------------------
#COUPLED CLUSTER ENERGY
#----------------------
#
#E(0) ... -76.026019996
#E(CORR)(strong-pairs) ... -0.211953159
#E(CORR)(weak-pairs) ... -0.000007244
#E(CORR)(corrected) ... -0.211960403
#E(TOT) ... -76.237980399
#Singles Norm <S|S>**1/2 ... 0.014443573
#T1 diagnostic ... 0.005106574
#
"""Sample CCSD(T) block (same for DLPNO and canonical)"""
#
#Triples Correction (T) ... -0.001544381
#Final correlation energy ... -0.134770265
#E(CCSD) ... -75.709548429
#E(CCSD(T)) ... -75.711092810
#
cc_start = find_start(lines, 'COUPLED CLUSTER ENERGY')
if cc_start == -1:
return
#psivar["CC REFERENCE"] = float(lines[cc_start + 3].split()[-1])
# CCSD energy block is less than 20 lines
for i, line in enumerate(lines[cc_start:cc_start + 20], start=cc_start):
if line[:6] == "E(TOT)":
psivar["CCSD TOTAL ENERGY"] = line.split()[-1]
psivar["CCSD CORRELATION ENERGY"] = lines[i-1].split()[-1]
#psivar["SINGLES NORM"] = lines[i+1].split()[-1]
#psivar["T1 DIAGNOSTIC"] = lines[i+2].split()[-1]
break
# CCSD(T) energy block
for i, line in enumerate(lines[cc_start:], start=cc_start):
if line[:22] == "Triples Correction (T)":
#psivar["TRIPLES CORRELATION ENERGY"] = line.split()[-1]
psivar["CCSD(T) CORRELATION ENERGY"] = lines[i+1].split()[-1]
psivar["CCSD TOTAL ENERGY"] = lines[i+2].split()[-1]
psivar["CCSD(T) TOTAL ENERGY"] = lines[i+3].split()[-1]
break
def harvest_engrad(engrad):
"""Parse the engrad file for the gradient"""
try:
lines = open(engrad).readlines()
except IOError:
return []
num_atoms = int(lines[3].strip())
energy = lines[7].strip()
grad = []
for i in range(12, 13 + num_atoms*3, 3):
grad.append(list(map(float, lines[i:i + 3])))
return grad
def find_start(lines, start_str, reverse=True):
"""Find the start of a block, iterate backwards by default,
Usually the last one is wanted
If not found, return -1
"""
start = -1
# Iterate backwards until the last value is found
if reverse:
for i, line in reversed(list(enumerate(lines))):
if start_str == line:
return i
else:
for i, line in enumerate(lines):
if start_str == line:
return i
return start
| lgpl-3.0 |
petermalcolm/osf.io | website/files/models/figshare.py | 2 | 1360 | from website.util.sanitize import escape_html
from website.files.models.base import File, Folder, FileNode, FileVersion
__all__ = ('FigshareFile', 'FigshareFolder', 'FigshareFileNode')
class FigshareFileNode(FileNode):
provider = 'figshare'
class FigshareFolder(FigshareFileNode, Folder):
pass
class FigshareFile(FigshareFileNode, File):
def touch(self, bearer, revision=None, **kwargs):
return super(FigshareFile, self).touch(bearer, revision=None, **kwargs)
def update(self, revision, data, user=None):
"""Figshare does not support versioning.
Always pass revision as None to avoid conflict.
"""
self.name = data['name']
self.materialized_path = data['materialized']
self.save()
version = FileVersion(identifier=None)
version.update_metadata(data, save=False)
# Draft files are not renderable
if data['extra']['status'] == 'drafts':
return (version, u'''
<style>.file-download{{display: none;}}</style>
<div class="alert alert-info" role="alert">
The file "{name}" is still a draft on figshare. <br>
To view it on the OSF <a href="http://figshare.com/faqs">publish</a> it on figshare.
</div>
'''.format(name=escape_html(self.name)))
return version
| apache-2.0 |
anushreejangid/csm-ut | csmpe/core_plugins/csm_install_operations/ios_xe/utils.py | 1 | 18972 | # =============================================================================
#
# Copyright (c) 2016, Cisco Systems
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
import re
import string
install_error_pattern = re.compile("Error: (.*)$", re.MULTILINE)
def log_install_errors(ctx, output):
"""
Print warning for Error:
:param ctx:
:param output:
:return: nothing
"""
errors = re.findall(install_error_pattern, output)
for line in errors:
ctx.warning(line)
def number_of_rsp(ctx):
"""
Determine the number of RSP's in the chassis
:param ctx:
:return: the number of RSP's
"""
platforms = ['ASR-902', 'ASR-920']
count = 0
valid_count = ['1', '2']
if ctx._connection.platform in platforms:
count = 1
return count
output = ctx.send("show platform | count RSP")
if output:
m = re.search('Number.*= (\d+)', output)
if m:
count = m.group(1)
if count not in valid_count:
ctx.error("Invalid RSP count: {}".format(count))
else:
count = int(count)
return count
def install_folder(ctx):
"""
Determine the image folder
'File: bootflash:/Image/packages.conf, on: RP0'
'File: consolidated:packages.conf, on: RP0'
:param ctx
:return: the image folder
"""
folder = 'bootflash:/Image'
output = ctx.send("show version running | include packages.conf")
if output:
m = re.search('File: (.*)/?packages.conf', output)
if m:
folder = m.group(1)
folder = re.sub("/$", "", folder)
if folder == 'consolidated:':
folder = 'bootflash:/Image'
return folder
def create_folder(ctx, folder):
"""
Determine the image folder
'File: bootflash:/Image/packages.conf, on: RP0'
'File: consolidated:packages.conf, on: RP0'
:param ctx
:param folder to be created
:return: True: Success, False: Failed
"""
output = ctx.send('dir ' + folder)
m = re.search('%Error opening', output)
if m:
cmd = 'mkdir ' + folder
ctx.send(cmd, wait_for_string="Create directory filename")
ctx.send('\r\n')
else:
return True
output = ctx.send('dir ' + folder)
m = re.search('%Error opening', output)
if m:
return False
else:
return True
def available_space(ctx, device):
"""
Determine the available space on device such as bootflash or stby-bootflash:
:param ctx:
:param device: bootflash / stby-bootflash:
:return: the available space
"""
available = -1
output = ctx.send('dir ' + device)
m = re.search('(\d+) bytes free', output)
if m:
available = int(m.group(1))
return available
def installed_package_name(ctx, pkg_conf):
"""
:param: ctx
:param: pkg_conf such as bootflash:/Image/packages.conf
:return: the installed package name
"""
output = ctx.send('dir ' + pkg_conf)
if not output:
ctx.error("dir {} failed".format(pkg_conf))
return None
m = re.search('No such file', output)
if m:
ctx.info('{} does not exist'.format(pkg_conf))
return None
cmd = "more " + pkg_conf + " | include PackageName"
output = ctx.send(cmd)
m = re.search('pkginfo: PackageName: (.*)$', output)
if m:
img_name = m.group(1)
ctx.info("installed_package_name: installed "
"name = {}".format(img_name))
return img_name
else:
ctx.info("PackageName is not found in {}".format(pkg_conf))
return None
def installed_package_version(ctx):
"""
:param: ctx
:return: the installed package name
"""
# cmd = "more " + pkg_conf + " | include Build:"
# pkginfo: Build: 03.14.03.S.155-1.S3-std
# output = ctx.send(cmd)
# m = re.search('pkginfo: Build: (.*)$', output)
cmd = 'show version | include Cisco IOS XE Software'
# Cisco IOS XE Software, Version 03.13.03.S - Extended Support Release
output = ctx.send(cmd)
m = re.search('Version (.*) -', output)
if m:
bld_version = m.group(1)
ctx.info("installed_package_version: installed "
"version = {}".format(bld_version))
return bld_version
else:
ctx.info("Build version is not found in show version: {}".format(output))
return None
def installed_package_device(ctx):
"""
:param: ctx
:return: device_type with rsp version ie asr900rsp2
"""
cmd = 'show version running | include File:'
# File: bootflash:/Image/asr900rsp2-rpbase.03.13.03.S.154-3.S3-ext.pkg, on: RP0
img_dev = None
output = ctx.send(cmd)
if output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
m = re.search('File: .*(asr.*)-\w+.\d+', line)
if m:
img_dev = m.group(1)
break
ctx.info("installed_package_device: device type = {}".format(img_dev))
return img_dev
def install_package_family(pkg):
"""
:param: pkg ie asr900rsp2-universal.03.13.03.S.154-3.S3-ext.bin
:return: device_type of the installed image ie asr900
"""
img_dev = None
m = re.search('(asr\d+)\w*', pkg)
if m:
img_dev = m.group(1)
return img_dev
def install_add_remove(ctx, cmd):
"""
Execute the copy command
:param ctx
:param cmd
:return: nothing
"""
message = "Waiting the operation to continue"
ctx.info(message)
ctx.post_status(message)
ctx.send(cmd, wait_for_string="Destination filename")
output = ctx.send("\r\n\r\n\r\n", timeout=3600)
result = re.search("\d+ bytes copied in .* secs", output)
if result:
ctx.info("Command {} finished successfully".format(cmd))
return
else:
log_install_errors(ctx, output)
ctx.error("Command {} failed".format(cmd))
def check_pkg_conf(ctx, folder):
"""
Remove the existing packages
:param ctx
:param folder: i.e. bootflash:/Image
:return: True or False
"""
pkg_conf = folder + '/packages.conf'
output = ctx.send('more ' + pkg_conf + ' | include pkg$')
if not output:
return False
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
if ctx._connection.os_version not in line:
return False
return True
def remove_exist_image(ctx, package):
"""
Remove the existing packages
:param ctx
:param package
:return: True or False
"""
output = ctx.send('dir ' + package)
m = re.search('No such file', output)
if m:
return True
else:
cmd = "del /force {}".format(package)
ctx.send(cmd)
ctx.info("Removing files : {}".format(package))
output = ctx.send(cmd)
m = re.search('No such file', output)
if m:
return True
else:
return False
def remove_exist_subpkgs(ctx, folder, pkg):
"""
Remove residual packages from the earlier installations
:param ctx
:param folder: i.e. bootflash:/Image
:return: True or False
"""
pkg_conf = folder + '/packages.conf'
# Skip if no packages.conf
output = ctx.send('dir ' + pkg_conf)
if not output:
ctx.error("dir {} failed".format(pkg_conf))
return
m = re.search('No such file', output)
if m:
ctx.info('Booted from consolidated mode: '
'{} does not exist'.format(pkg_conf))
return
# Discover package name, version, and image device
img_name = installed_package_name(ctx, pkg_conf)
bld_version = installed_package_version(ctx)
img_device = installed_package_device(ctx)
if not bld_version or not img_device or not img_name:
ctx.error("Not able to determine the residual files")
return
# Remove all the bin files except the current install pkg
if folder != 'bootflash:':
package_name = folder + '/asr*.bin'
remove_exist_image(ctx, package_name)
else:
package_name = folder + '*.bin'
output = ctx.send('dir ' + package_name + ' | include bin')
if not output:
ctx.error("dir {} failed".format(package_name))
return
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
m = re.search('(asr.*\.bin)', line)
if m:
previous_pkg = m.group(0)
if previous_pkg != pkg:
previous_package = folder + '/' + previous_pkg
remove_exist_image(ctx, previous_package)
# Remove the packages.conf*- file
package_name = folder + '/packages.conf*-'
remove_exist_image(ctx, package_name)
# Remove residual asr900*.conf
package_name = folder + '/asr9*.conf'
remove_exist_image(ctx, package_name)
# Remove .pkg files
cmd = 'dir ' + folder + '/*.pkg | include pkg'
# Directory of bootflash:/Image/*.pkg
# 15107 -rw- 41534024 Sep 8 2016 03:55:47 +00:00 asr900rsp2-espbase.03.14.03.S.155-1.S3-std.pkg
output = ctx.send(cmd)
if not output:
return
lines = string.split(output, '\n')
lines = [x for x in lines if x]
for line in lines:
m = re.search('(asr9.*pkg)', line)
if m:
exfile = m.group(1)
package = folder + '/' + exfile
if bld_version not in package or img_device not in package:
remove_exist_image(ctx, package)
return
def check_issu_readiness(ctx, pkg, image_size):
"""
Expand the consolidated file into the image folder
:param: ctx
:param: pkg
:param: image_size
:return: True or False
"""
# check the current package mode
cmd = 'show version | count packages.conf'
output = ctx.send(cmd)
if output:
m = re.search('Number.*= (\d+)', output)
if m:
count = m.group(1)
if count == '0':
ctx.info("The current boot mode is consolidated package.")
return False
else:
ctx.warning("Invalid show version output: {}".format(output))
return False
else:
ctx.warning("Show version command error!")
return False
# check software compatibility
cmd = 'show version | include System image file'
output = ctx.send(cmd)
if output:
m = re.search('System image file is \"(.*)\"', output)
if m:
pkg_conf = m.group(1)
img_name = installed_package_name(ctx, pkg_conf)
if not img_name:
ctx.warning("Installed package name {} is not found.".format(pkg_conf))
return False
else:
ctx.warning("Show version command error!")
return False
else:
ctx.warning("Show version command error!")
return False
m = re.search('asr.*-(.*)\.\d+\.\d+\.\d+.*', pkg)
if m:
pkg_name = m.group(1)
if img_name != pkg_name:
ctx.info("Incompatible packages: {} vs. {}".format(img_name, pkg_name))
return False
else:
ctx.warning("Package name is not found in {}".format(pkg))
return False
# check image types between RSP's
cmd = 'show version rp active running | include Package'
output = ctx.send(cmd)
cmd = 'show version rp standby running | include Package'
stby_output = ctx.send(cmd)
if output and stby_output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
# Package: rpbase, version: 03.16.00.S.155-3.S-ext, status: active
for line in lines:
m = re.search('Package: (.*) status', line)
if m:
img_type = m.group(1)
if img_type not in stby_output:
ctx.warning("Mismatched image types:")
ctx.warning("Active rp version: {}".format(output))
ctx.warning("Standby rp version: {}".format(stby_output))
return False
else:
ctx.warning("Invalid package version format: {}".format(line))
return False
else:
ctx.warning("Show version command error!")
return False
# check the required disk space for ISSU
# bootflash: requires additional 250 MB
# stby-bootflash: requires additional 450 MB
total_size = 250000000 + image_size
flash_free = available_space(ctx, 'bootflash:')
if flash_free < total_size:
ctx.info("Total required / bootflash "
"available: {} / {} bytes".format(total_size, flash_free))
ctx.info("Not enough space in bootflash: to perform ISSU. "
"Setting the Router to boot in sub-package mode.")
return False
total_size = 450000000 + image_size
flash_free = available_space(ctx, 'stby-bootflash:')
if flash_free < total_size:
ctx.info("Total required / stby-bootflash "
"available: {} / {} bytes".format(total_size, flash_free))
ctx.info("Not enough space in stby-bootflash: to perform ISSU. "
"Setting the Router to boot in sub-package mode.")
return False
else:
ctx.info("There is enough space on bootflash and stby-bootflash to perform ISSU")
# check show redundancy
cmd = 'show redundancy | include Configured Redundancy Mode'
output = ctx.send(cmd)
if output:
m = re.search('Configured Redundancy Mode = (.*)', output)
if m:
configed_mode = m.group(1)
if configed_mode != 'sso':
ctx.warning("Configured Redundancy Mode = {}".format(configed_mode))
return False
else:
ctx.warning("Show redundancy command error!")
return False
else:
ctx.warning("Show redundancy command error!")
return False
cmd = 'show redundancy | include Operating Redundancy Mode'
output = ctx.send(cmd)
if output:
m = re.search('Operating Redundancy Mode = (.*)', output)
if m:
operating_mode = m.group(1)
if operating_mode != 'sso':
ctx.warning("Operating Redundancy Mode = {}".format(operating_mode))
return False
else:
ctx.warning("Show redundancy command error!")
return False
else:
ctx.warning("Show redundancy command error!")
return False
cmd = 'show redundancy | include Current Software state'
output = ctx.send(cmd)
if output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
num_of_line = len(lines)
if num_of_line != 2:
ctx.warning("num_of_line = {}".format(num_of_line))
ctx.warning("Current Software state = {}".format(output))
return False
m = re.search('Current Software state = (.*)', lines[0])
if m:
active_state = m.group(1)
if 'ACTIVE' not in active_state:
ctx.warning("show redundancy Active state check has failed")
ctx.warning("active_state = {}".format(active_state))
ctx.warning("Current Software state = {}".format(lines[0]))
return False
else:
ctx.warning("Show redundancy command error!")
return False
m = re.search('Current Software state = (.*)', lines[1])
if m:
stby_state = m.group(1)
if 'STANDBY HOT' not in stby_state:
ctx.warning("show redundancy STANDBY HOT state check has failed")
ctx.warning("stby_state = {}".format(stby_state))
ctx.warning("Current Software state = {}".format(lines[1]))
return False
else:
ctx.warning("Show redundancy command error!")
return False
else:
ctx.warning("Show redundancy command error!")
return False
return True
def xe_show_platform(ctx):
"""
Parse show platform output to extract the RP and SIP status
:param: ctx
:return: dictionary
0 1 2 3 4 5 6
012345678901234567890123456789012345678901234567890123456789012345678
Slot Type State Insert time (ago)
--------- ------------------- --------------------- -----------------
0/0 12xGE-2x10GE-FIXED ok 15:09:04
R1 A900-RSP2A-128 ok, active 14:09:30
"""
platform_info = {}
cmd = 'show platform'
output = ctx.send(cmd)
if output:
lines = string.split(output, '\n')
lines = [x for x in lines if x]
sip0 = False
for line in lines:
if not sip0:
m = re.search('--------- ------------------- '
'--------------------- -----------------', line)
if m:
sip0 = True
continue
m = re.search('Slot CPLD Version Firmware Version', line)
if m:
break
Slot = line[:8].strip()
Type = line[10:28].strip()
State = line[30:50].strip()
m1 = re.search('^0\/\d+', Slot)
m2 = re.search('^R\d+', Slot)
if m1 or m2:
platform_info[Slot] = [Type, State]
return platform_info
| bsd-2-clause |
smalls257/VRvisu | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/encodings/utf_16.py | 404 | 3984 | """ Python 'utf-16' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs, sys
### Codec APIs
encode = codecs.utf_16_encode
def decode(input, errors='strict'):
return codecs.utf_16_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
codecs.IncrementalEncoder.__init__(self, errors)
self.encoder = None
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_16_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
return self.encoder(input, self.errors)[0]
def reset(self):
codecs.IncrementalEncoder.reset(self)
self.encoder = None
def getstate(self):
# state info we return to the caller:
# 0: stream is in natural order for this platform
# 2: endianness hasn't been determined yet
# (we're never writing in unnatural order)
return (2 if self.encoder is None else 0)
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def __init__(self, errors='strict'):
codecs.BufferedIncrementalDecoder.__init__(self, errors)
self.decoder = None
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_16_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_16_be_decode
elif consumed >= 2:
raise UnicodeError("UTF-16 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def reset(self):
codecs.BufferedIncrementalDecoder.reset(self)
self.decoder = None
class StreamWriter(codecs.StreamWriter):
def __init__(self, stream, errors='strict'):
codecs.StreamWriter.__init__(self, stream, errors)
self.encoder = None
def reset(self):
codecs.StreamWriter.reset(self)
self.encoder = None
def encode(self, input, errors='strict'):
if self.encoder is None:
result = codecs.utf_16_encode(input, errors)
if sys.byteorder == 'little':
self.encoder = codecs.utf_16_le_encode
else:
self.encoder = codecs.utf_16_be_encode
return result
else:
return self.encoder(input, errors)
class StreamReader(codecs.StreamReader):
def reset(self):
codecs.StreamReader.reset(self)
try:
del self.decode
except AttributeError:
pass
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_16_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_16_le_decode
elif byteorder == 1:
self.decode = codecs.utf_16_be_decode
elif consumed>=2:
raise UnicodeError,"UTF-16 stream does not start with BOM"
return (object, consumed)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| gpl-3.0 |
adnanh/zulip | zerver/templatetags/minified_js.py | 118 | 1402 | from __future__ import absolute_import
from django.template import Node, Library, TemplateSyntaxError
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
register = Library()
class MinifiedJSNode(Node):
def __init__(self, sourcefile):
self.sourcefile = sourcefile
def render(self, context):
if settings.DEBUG:
scripts = settings.JS_SPECS[self.sourcefile]['source_filenames']
else:
scripts = [settings.JS_SPECS[self.sourcefile]['output_filename']]
script_urls = [staticfiles_storage.url(script) for script in scripts]
script_tags = ['<script type="text/javascript" src="%s" charset="utf-8"></script>'
% url for url in script_urls]
return '\n'.join(script_tags)
@register.tag
def minified_js(parser, token):
try:
tag_name, sourcefile = token.split_contents()
except ValueError:
raise TemplateSyntaxError("%s tag requires an argument" % tag_name)
if not (sourcefile[0] == sourcefile[-1] and sourcefile[0] in ('"', "'")):
raise TemplateSyntaxError("%s tag should be quoted" % tag_name)
sourcefile = sourcefile[1:-1]
if sourcefile not in settings.JS_SPECS:
raise TemplateSyntaxError("%s tag invalid argument: no JS file %s"
% (tag_name, sourcefile))
return MinifiedJSNode(sourcefile)
| apache-2.0 |
tnemisteam/cdf-steps | teacher/views/teacher_professional_views.py | 1 | 4299 | from django.views.generic import ListView, DetailView, CreateView, \
DeleteView, UpdateView, \
ArchiveIndexView, DateDetailView, \
DayArchiveView, MonthArchiveView, \
TodayArchiveView, WeekArchiveView, \
YearArchiveView
from teacher.models import Teacher_professional
class Teacher_professionalView(object):
model = Teacher_professional
def get_template_names(self):
"""Nest templates within teacher_professional directory."""
tpl = super(Teacher_professionalView, self).get_template_names()[0]
app = self.model._meta.app_label
mdl = 'teacher_professional'
#self.template_name = tpl.replace(app, '{0}/{1}'.format(app, mdl))
self.template_name = tpl[:8]+'teacher_professional/'+tpl[8:]
return [self.template_name]
class Teacher_professionalDateView(Teacher_professionalView):
date_field = 'timestamp'
month_format = '%m'
class Teacher_professionalBaseListView(Teacher_professionalView):
paginate_by = 10
class Teacher_professionalArchiveIndexView(
Teacher_professionalDateView, Teacher_professionalBaseListView, ArchiveIndexView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalCreateView(Teacher_professionalView, CreateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDateDetailView(Teacher_professionalDateView, DateDetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDayArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, DayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDeleteView(Teacher_professionalView, DeleteView):
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalDetailView(Teacher_professionalView, DetailView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalListView(Teacher_professionalBaseListView, ListView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalMonthArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, MonthArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalTodayArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, TodayArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalUpdateView(Teacher_professionalView, UpdateView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalWeekArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, WeekArchiveView):
pass
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
class Teacher_professionalYearArchiveView(
Teacher_professionalDateView, Teacher_professionalBaseListView, YearArchiveView):
make_object_list = True
def get_success_url(self):
from django.core.urlresolvers import reverse
return reverse('teacher_teacher_professional_list')
| mit |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/xml/etree/__init__.py | 183 | 1604 | # $Id: __init__.py 1821 2004-06-03 16:57:49Z fredrik $
# elementtree package
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
| gpl-2.0 |
kumar-abhishek/algorithms-1 | cake/rotated.py | 3 | 3678 | #! opt/local/bin/python3
import sys
'''
Given huge array of words, find rotation point.
Array is sorted, but has been shifted so start of sorted section is
not at the front.
'''
def find_rotation_point_iterative(arr, lower, upper):
while lower <= upper:
partition = (int)((upper + lower + 1) / 2)
if partition - 1 >= 0 and arr[partition] < arr[partition - 1]:
return partition
if arr[lower] < arr[partition]:
# subsection is sorted, so examine upper half
lower = partition + 1
elif arr[partition] <= arr[upper]:
# subsection is sorted, so examine lower half
upper = partition - 1
if arr[0] < arr[len(arr) - 1]:
return 0
return partition
def find_rotation_point_helper(arr, lower, upper):
if lower > upper:
return -1;
partition = (int)((lower + upper + 1) / 2)
if partition - 1 >= 0 and arr[partition] < arr[partition - 1]:
return partition
if arr[lower] < arr[partition]:
# subsection is sorted, so examine upper half
return find_rotation_point_helper(arr, partition + 1, upper)
elif arr[partition] <= arr[upper]:
# subsection is sorted, so examine lower half
return find_rotation_point_helper(arr, lower, partition - 1)
def find_rotation_point_runner(arr, expected):
if arr[0] <= arr[len(arr) - 1]:
result = 0
else:
result = find_rotation_point_helper(arr, 0, len(arr) - 1)
print('Recursive: ', result, ' == ', expected, '?')
result = find_rotation_point_iterative(arr, 0, len(arr) - 1)
print('Iterative: ', result, ' == ', expected, '?')
# true if value may potentially be within these bounds
# either because segment is sorted, and falls within range,
# or segment contains rotated point and target may still be in
# this range
def within_rotated_segment(arr, lower, upper, target):
return (arr[lower] <= arr[upper] and target >= arr[lower] and target <= arr[upper]) or \
(arr[lower] > arr[upper] and (target >= arr[lower] or target <= arr[upper]))
def search_rotated_arr(arr, target):
lower = 0
upper = len(arr) - 1
while lower < upper:
partition = (int)((lower + upper + 1) / 2)
if target is arr[partition]:
return partition
if within_rotated_segment(arr, lower, partition - 1, target):
upper = partition - 1
else:
lower = partition + 1
if arr[upper] is target:
return upper
def main(argv):
words = [
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
]
find_rotation_point_runner(words, 0)
print(search_rotated_arr(words, 'asymptote'), '\t== 0?')
print(search_rotated_arr(words, 'retrograde'), '\t== 7?')
print(search_rotated_arr(words, 'xenoepist'), '\t== 10?')
words = [
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'asymptote', # <-- rotates here!
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
]
find_rotation_point_runner(words, 5)
print(search_rotated_arr(words, 'asymptote'), '\t== 5?')
print(search_rotated_arr(words, 'retrograde'), '\t== 1?')
print(search_rotated_arr(words, 'xenoepist'), '\t== 4?')
print(search_rotated_arr(words, 'karpatka'), '\t== 9?')
words = [
'babka',
'banoffee',
'engender',
'karpatka',
'othellolagkage',
'ptolemaic',
'retrograde',
'supplant',
'undulate',
'xenoepist',
'asymptote' # <-- rotates here!
]
find_rotation_point_runner(words, 10)
print(search_rotated_arr(words, 'asymptote'), '\t== 10?')
print(search_rotated_arr(words, 'retrograde'), '\t== 6?')
print(search_rotated_arr(words, 'xenoepist'), '\t== 9?')
print(search_rotated_arr(words, 'karpatka'), '\t== 3?')
if __name__ == '__main__':
main(sys.argv[1:]) | mit |
pedroernesto/morphounit | morphounit/tests/morph_cells/test_NeuroM_MorphStats.py | 1 | 20138 | import sciunit
import sciunit.scores as sci_scores
import morphounit.scores as mph_scores
# import morphounit.capabilities as mph_cap
import morphounit.plots as mph_plots
import os
import copy
import json
import neurom as nm
import numpy as np
import quantities
class NeuroM_MorphStats_Test(sciunit.Test):
"""Tests a set of cell's morphological features"""
score_type = mph_scores.CombineZScores
def __init__(self, observation=None, name="NeuroM_MorphStats_Test", base_directory=None):
self.description = "Tests a set of cell's morpho-features in a digitally reconstructed neuron"
# require_capabilities = (mph_cap.ProvidesMorphFeatureInfo,)
if not base_directory:
base_directory = "."
self.path_test_output = base_directory
# create output directory
if not os.path.exists(self.path_test_output):
os.makedirs(self.path_test_output)
# Checks raw observation data compliance with NeuroM's nomenclature
self.check_observation(observation)
self.raw_observation = observation
json.dumps(observation, sort_keys=True, indent=3)
self.figures = []
observation = self.format_data(observation)
sciunit.Test.__init__(self, observation, name)
# ----------------------------------------------------------------------
def check_observation(self, observation):
"""Checks raw observation file compliance with NeuroM's ('fst' module) nomenclature"""
# Cell parts available
neuron_parts_avail = [neurite_type.name for neurite_type in nm.NEURITE_TYPES[1:]]
neuron_parts_avail.append('neuron')
# Cell features available
cell_feats_avail = nm.fst.NEURONFEATURES.keys()
# Neurite features available
neurite_feats_avail = list(nm.fst.NEURITEFEATURES.keys())
neurite_feats_extra = ['neurite_field_diameter', 'neurite_largest_extent', 'neurite_shortest_extent',
'neurite_X_extent', 'neurite_Y_extent', 'neurite_Z_extent']
neurite_feats_avail.extend(neurite_feats_extra)
# Statistical modes available
stat_modes = ['min', 'max', 'median', 'mean', 'total', 'std']
# morph_stats's nomenclature constraints to specify observation files
"""
self.neuroM_morph_stats_doc(neuron_parts_avail,
cell_feats_avail, neurite_feats_avail, neurite_feats_extra,
stat_modes)
"""
# print "Checking observation file compliance with NeuroM's ('fst' module) nomenclature..."
for dict1 in observation.values(): # Dict. with cell's part-features dictionary pairs for each cell
for key2, dict2 in list(dict1.items()): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
assert (key2 in neuron_parts_avail), \
f"{key2} is not permitted for neuron parts. Please, use one in the following \
list:\n {neuron_parts_avail}"
for key3 in dict2.keys():
feat_name, stat_mode = key3.split('_', 1)[1], key3.split('_', 1)[0]
if key2 == 'neuron':
# Checking the NeuroM features for the cell
assert (feat_name in cell_feats_avail), \
f"{feat_name} is not permitted for cells. Please, use one in the following \
list:\n {sorted(cell_feats_avail)}"
# Checking the statistical mode for the cell features
assert (stat_mode in stat_modes), \
f"{stat_mode} is not permitted for statistical modes. Please, use one in \
the following list:\n {stat_modes}"
elif feat_name in nm.fst.NEURITEFEATURES.keys():
assert (stat_mode in stat_modes), \
f"{stat_mode} is not permitted for statistical modes. Please, use one in \
the following \list:\n {stat_modes}"
else:
# Checking the extra-NeuroM features for Neurites, if any
assert (key3 in neurite_feats_extra), \
f"{key3} is not permitted for neurites. Please, use one in the following \
list:\n {sorted(neurite_feats_avail)}"
# ----------------------------------------------------------------------
def neuroM_morph_stats_doc(self, neuron_parts_avail, cell_feats_avail,
neurite_feats_avail, neurite_feats_extra, stat_modes):
"""Prints NeuroM ('fst' module) nomenclature constraints to be followed
by the user when specifying observation files"""
print ('Cell parts available:\n', sorted(neuron_parts_avail), '\n')
print ('Cell features available:\n', sorted(cell_feats_avail), '\n')
print ('Neurite features available:\n', sorted(neurite_feats_avail), '\n')
print ('A summary statistics must be indicated for each feature, with the ' \
'exception of those contained in the set ', neurite_feats_extra, \
'. Statistics modes available: ', stat_modes, '\n')
# How to specify feature_name = mode + feature
print ("To that end, a prefix formed with the stats. mode intended, followed by '_', " \
"should be added to the feature name. For instance: 'total_number_of_neurites' \n")
print ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n\n")
# ----------------------------------------------------------------------
def format_data(self, data):
"""
This accepts data input in the form:
***** (observation) *****
{"cell_kind": { "cell_part_1": {'morph_feature_name_11': {'mean value': 'X11_mean units_str', 'std': 'X11_std units_str'},
'morph_feature_name_12': {'mean value': 'X12_mean units_str', 'std': 'X12_std units_str'},
... },
"cell_part_2": {'morph_feature_name_21': {'mean value': 'X21_mean units_str', 'std': 'X21_std units_str'},
'morph_feature_name_22': {'mean value': 'X22_mean units_str', 'std': 'X22_std units_str'},
... },
... }
}
***** (prediction) *****
{"cell1_ID": { 'cell_part_1': {'morph_feature_name_11': {'value': 'X11 units_str'},
'morph_feature_name_12': {'value': 'X12 units_str'},
... },
'cell_part_2': {'morph_feature_name_21': {'value': 'X21 units_str'},
'morph_feature_name_22': {'value': 'X22 units_str'},
... },
... }
"cell2_ID": { 'cell_part_1': {'morph_feature_name_11': {'value': 'Y11 units_str'},
'morph_feature_name_12': {'value': 'Y12 units_str'},
... },
'cell_part_2': {'morph_feature_name_21': {'value': 'Y21 units_str'},
'morph_feature_name_22': {'value': 'Y22 units_str'},
... },
... }
... }
It splits the values of mean, std and value to numeric quantities
and their units (via quantities package)
"""
dim_non = ['order', 'number', 'asymmetry', 'rate']
dim_um = ['radii', 'length', 'distance', 'extent']
dim_umSq = ['area']
dim_umCb = ['volume']
dim_deg = ['angle']
for dict1 in data.values(): # Dict. with cell's part-features dictionary pairs for each cell
for dict2 in dict1.values(): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
for dict3 in dict2.values(): # Dict. with 'value', 'mean' and 'std' values
for key, val in dict3.items():
quantity_parts = val.split()
number, units_str = float(quantity_parts[0]), " ".join(quantity_parts[1:])
try:
if any(sub_str in key for sub_str in dim_um):
assert (units_str == quantities.um | units_str == quantities.mm), \
sciunit.Error("Values not in appropriate format. Required units: mm or um")
elif any(sub_str in key for sub_str in dim_non):
assert (units_str == quantities.dimensionless), \
sciunit.Error("Values not in appropriate format. Required units: ",
quantities.dimensionless)
finally:
dict3[key] = quantities.Quantity(number, units_str)
return data
# ----------------------------------------------------------------------
def validate_observation(self, observation):
# Checking format of the observation data
for dict1 in observation.values(): # Dict. with cell's part-features dictionary pairs for each cell
for dict2 in dict1.values(): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
for dict3 in dict2.values(): # Dict. with 'value' or 'mean' and 'std' values
for val in dict3.values():
assert type(val) is quantities.Quantity, \
sciunit.Error(("Observation must be of the form "
"{'mean': 'XX units_str','std': 'YY units_str'}"))
# ----------------------------------------------------------------------
def set_morph_stats_config_file(self):
""" Creates two configuration files, following the structure of a
raw observation JSON file (previously to SciUnit formatting):
- One for morph_stats features to be computed, and
a second one for non-morph_stats features found in the observation file."""
observation = self.raw_observation
neurite_type_list = list()
feat_name_stat_mode_neurite_dict = dict()
feat_name_stat_mode_cell_dict = dict()
neurite_feats_extra_dict = dict() # For non-morph_stats features
for dict1 in observation.values(): # Dict. with cell's part-features dictionary pairs for each cell
for key2, dict2 in dict1.items(): # Dict. with feature name-value pairs for each cell part:
# neuron, apical_dendrite, basal_dendrite or axon
if key2 == 'neuron':
feat_name_stat_mode_cell_dict = dict()
else:
neurite_type_list.append(key2.upper())
neurite_feats_extra_dict.update({key2: []})
for key3 in dict2.keys():
feat_name, stat_mode = key3.split('_', 1)[1], key3.split('_', 1)[0]
if key2 == 'neuron':
if feat_name in feat_name_stat_mode_cell_dict and \
stat_mode not in feat_name_stat_mode_cell_dict[feat_name]:
feat_name_stat_mode_cell_dict[feat_name].append(stat_mode)
else:
feat_name_stat_mode_cell_dict.update({feat_name: [stat_mode]})
elif feat_name in nm.fst.NEURITEFEATURES.keys():
if feat_name in feat_name_stat_mode_neurite_dict and \
stat_mode not in feat_name_stat_mode_neurite_dict[feat_name]:
feat_name_stat_mode_neurite_dict[feat_name].append(stat_mode)
else:
feat_name_stat_mode_neurite_dict.update({feat_name: [stat_mode]})
else:
neurite_feats_extra_dict[key2].append(key3)
# Morphometrics of morph_stats features to be computed
morph_stats_config_dict = dict()
morph_stats_config_dict.update({'neurite_type': neurite_type_list,
'neurite': feat_name_stat_mode_neurite_dict,
'neuron': feat_name_stat_mode_cell_dict})
# print('Configuration file for morph_stats was completed. \n', \
# json.dumps(morph_stats_config_dict, sort_keys=True, indent=3))
obs_dir = self.path_test_output
# obs_dir = os.path.dirname(observation_path)
# obs_file_name = os.path.basename(observation_path)
# Saving NeuroM's morph_stats configuration file in JSON format
# morph_stats_conf_file = os.path.splitext(obs_file_name)[0] + '_config.json'
morph_stats_config_path = os.path.join(obs_dir, 'morph_stats_config.json')
with open(morph_stats_config_path, 'w') as fp:
json.dump(morph_stats_config_dict, fp, sort_keys=True, indent=3)
# Morphometrics of non-morph_stats features to be computed
for key, value in neurite_feats_extra_dict.items():
if not value:
del neurite_feats_extra_dict[key]
# neuroM_extra_config_file = os.path.splitext(obs_file_name)[0] + '_extra.json'
neuroM_extra_config_path = os.path.join(obs_dir, 'neuroM_extra_config.json')
# Remove existing file, if any
extra_file_exists = os.path.isfile(neuroM_extra_config_path)
if extra_file_exists:
os.remove(neuroM_extra_config_path)
if neurite_feats_extra_dict:
# print('The following morphometrics will be extracted separately and added to the model prediction: \n', \
# json.dumps(neurite_feats_extra_dict, sort_keys=True, indent=3))
# Saving NeuroM's configuration extra-file in JSON format
with open(neuroM_extra_config_path, 'w') as fp:
json.dump(neurite_feats_extra_dict, fp, sort_keys=True, indent=3)
return morph_stats_config_path, neuroM_extra_config_path
# ----------------------------------------------------------------------
def raw_model_prediction(self, model):
""" Creates a model prediction file containing the morphometrics \
specified in configuration files for NeuroM """
# Creates a configuration file for morph_stats, following the structure of a raw observation data
morph_stats_config_path, neuroM_extra_config_path = self.set_morph_stats_config_file()
# Creating the prediction file with morph_stats
self.morp_path = model.morph_path
mod_prediction_temp = model.set_morph_feature_info(morph_stats_config_path=morph_stats_config_path)
os.remove(morph_stats_config_path)
# Deleting some neurite's morphometrics added by morph_stats, but not present in the observation file
mod_prediction = copy.deepcopy(mod_prediction_temp)
cell_t = list(self.raw_observation.keys())[0] # Cell type
for cell_ID, cell_dict in list(mod_prediction_temp.items()):
for cell_part, cell_part_dict in list(cell_dict.items()):
for feat_name_stat_mode in cell_part_dict:
if cell_part != 'neuron' and feat_name_stat_mode not in self.raw_observation[cell_t][cell_part]:
del mod_prediction[cell_ID][cell_part][feat_name_stat_mode]
with open(model.output_pred_file, 'w') as fp:
json.dump(mod_prediction, fp, sort_keys=True, indent=3)
mod_prediction_all = model.complete_morph_feature_info(neuroM_extra_config_path=neuroM_extra_config_path)
os.remove(neuroM_extra_config_path)
os.remove(model.output_pred_file)
return mod_prediction_all
# ----------------------------------------------------------------------
def generate_prediction(self, model, verbose=False):
"""Implementation of sciunit.Test.generate_prediction"""
# Creates a model prediction file following some NeuroM configuration
# files for NeuroM, but additional formatting is needed
mod_prediction_all = self.raw_model_prediction(model)
mod_prediction = model.pre_formatting(mod_data=mod_prediction_all)
self.prediction_txt = copy.deepcopy(mod_prediction)
prediction = self.format_data(mod_prediction)
return prediction
# ----------------------------------------------------------------------
def compute_score(self, observation, prediction, verbose=True):
"""Implementation of sciunit.Test.score_prediction"""
self.observation = observation
self.prediction = prediction
# Computing the scores
cell_t = list(observation.keys())[0] # Cell type
score_cell_dict = dict.fromkeys([key0 for key0 in prediction.keys()], [])
obs_features = copy.deepcopy(list(observation.values()))[0]
score_feat_dict = dict()
for key0 in prediction: # cell_ID keys
score_feat_dict.update({key0: obs_features})
scores_cell_list = list()
for key1 in score_feat_dict[key0]: # cell's part: neuron, axon, apical_dendrite or basal_dendrite
for key2 in score_feat_dict[key0][key1]: # features names
score_feat_value = sci_scores.ZScore.compute(observation[cell_t][key1][key2],
prediction[key0][key1][key2]).score
scores_cell_list.extend([score_feat_value])
score_feat_dict[key0][key1][key2] = {"score": score_feat_value}
Mean_Zscore_dict = {"A mean |Z-score|": mph_scores.CombineZScores.compute(scores_cell_list).score}
score_feat_dict[key0].update(Mean_Zscore_dict)
score_cell_dict[key0] = Mean_Zscore_dict
self.score_cell_dict = score_cell_dict
self.score_feat_dict = score_feat_dict
# Taking the average of the cell's scores as the overall score for the Test
mean_score = np.mean([dict1["A mean |Z-score|"] for dict1 in score_cell_dict.values()])
self.score = mph_scores.CombineZScores(mean_score)
self.score.description = "A mean |Z-score|"
# ---------------------- Saving relevant results ----------------------
# Saving json file with model predictions
json_pred_file = mph_plots.jsonFile_MorphStats(testObj=self, dictData=self.prediction_txt,
prefix_name="prediction_summary_")
json_pred_files = json_pred_file.create()
self.figures.extend(json_pred_files)
# Saving json file with scores
json_scores_file = mph_plots.jsonFile_MorphStats(testObj=self, dictData=self.score_feat_dict,
prefix_name="scores_summary_")
json_scores_files = json_scores_file.create()
self.figures.extend(json_scores_files)
# Saving table with results
txt_table = mph_plots.TxtTable_MorphStats(testObj=self)
table_files = txt_table.create()
self.figures.extend(table_files)
# Saving figure with scores bar-plot
barplot_figure = mph_plots.ScoresBars_MorphStats(testObj=self)
barplot_files = barplot_figure.create()
self.figures.extend(barplot_files)
return self.score
def bind_score(self, score, model, observation, prediction):
score.related_data["figures"] = self.figures
return score
| bsd-3-clause |
jonathanunderwood/numpy | numpy/core/tests/test_function_base.py | 68 | 4911 | from __future__ import division, absolute_import, print_function
from numpy import (logspace, linspace, dtype, array, finfo, typecodes, arange,
isnan)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal
)
class TestLogspace(TestCase):
def test_basic(self):
y = logspace(0, 6)
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
y = logspace(0, 6, endpoint=0)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = logspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = logspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
class TestLinspace(TestCase):
def test_basic(self):
y = linspace(0, 10)
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=0)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
y = list(linspace(0, 1, 2.5))
assert_(y == [0.0, 1.0])
def test_type(self):
t1 = linspace(0, 1, 0).dtype
t2 = linspace(0, 1, 1).dtype
t3 = linspace(0, 1, 2).dtype
assert_equal(t1, t2)
assert_equal(t2, t3)
def test_dtype(self):
y = linspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
y = linspace(0, 6, dtype='float64')
assert_equal(y.dtype, dtype('float64'))
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
def test_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
t1 = linspace(lim1[0], lim1[1], 5)
t2 = linspace(lim2[0], lim2[1], 5)
t3 = linspace(lim3[0], lim3[1], 5)
t4 = linspace(-120.0, 100.0, 5)
t5 = linspace(120.0, -100.0, 5)
t6 = linspace(1200.0, 1000.0, 5)
assert_equal(t1, t4)
assert_equal(t2, t5)
assert_equal(t3, t6)
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
t1 = array([ 1.0+2.j, 1.5+2.5j, 2.0+3.j, 2.5+3.5j, 3.0+4.j])
lim2 = linspace(1j, 10, 5)
t2 = array([ 0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0.j])
assert_equal(lim1, t1)
assert_equal(lim2, t2)
def test_physical_quantities(self):
class PhysicalQuantity(float):
def __new__(cls, value):
return float.__new__(cls, value)
def __add__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) + float(self))
__radd__ = __add__
def __sub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(self) - float(x))
def __rsub__(self, x):
assert_(isinstance(x, PhysicalQuantity))
return PhysicalQuantity(float(x) - float(self))
def __mul__(self, x):
return PhysicalQuantity(float(x) * float(self))
__rmul__ = __mul__
def __div__(self, x):
return PhysicalQuantity(float(self) / float(x))
def __rdiv__(self, x):
return PhysicalQuantity(float(x) / float(self))
a = PhysicalQuantity(0.0)
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
def test_denormal_numbers(self):
# Regression test for gh-5437. Will probably fail when compiled
# with ICC, which flushes denormals to zero
for dt in (dtype(f) for f in typecodes['Float']):
stop = finfo(dt).tiny * finfo(dt).resolution
assert_(any(linspace(0, stop, 10, endpoint=False, dtype=dt)))
def test_equivalent_to_arange(self):
for j in range(1000):
assert_equal(linspace(0, j, j+1, dtype=int),
arange(j+1, dtype=int))
def test_retstep(self):
y = linspace(0, 1, 2, retstep=True)
assert_(isinstance(y, tuple) and len(y) == 2)
for num in (0, 1):
for ept in (False, True):
y = linspace(0, 1, num, endpoint=ept, retstep=True)
assert_(isinstance(y, tuple) and len(y) == 2 and
len(y[0]) == num and isnan(y[1]),
'num={0}, endpoint={1}'.format(num, ept))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
x303597316/hue | desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf3/anim.py | 56 | 1872 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Sรธren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import ANIMNS
from .element import Element
# Autogenerated
def Animate(**args):
return Element(qname = (ANIMNS,'animate'), **args)
def Animatecolor(**args):
return Element(qname = (ANIMNS,'animateColor'), **args)
def Animatemotion(**args):
return Element(qname = (ANIMNS,'animateMotion'), **args)
def Animatetransform(**args):
return Element(qname = (ANIMNS,'animateTransform'), **args)
def Audio(**args):
return Element(qname = (ANIMNS,'audio'), **args)
def Command(**args):
return Element(qname = (ANIMNS,'command'), **args)
def Iterate(**args):
return Element(qname = (ANIMNS,'iterate'), **args)
def Par(**args):
return Element(qname = (ANIMNS,'par'), **args)
def Param(**args):
return Element(qname = (ANIMNS,'param'), **args)
def Seq(**args):
return Element(qname = (ANIMNS,'seq'), **args)
def Set(**args):
return Element(qname = (ANIMNS,'set'), **args)
def Transitionfilter(**args):
return Element(qname = (ANIMNS,'transitionFilter'), **args)
| apache-2.0 |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/chardet/jpcntx.py | 1777 | 19348 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import wrap_ord
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis:
def __init__(self):
self.reset()
def reset(self):
self._mTotalRel = 0 # total sequence received
# category counters, each interger counts sequence in its category
self._mRelSample = [0] * NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._mNeedToSkipCharNum = 0
self._mLastCharOrder = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
def feed(self, aBuf, aLen):
if self._mDone:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._mNeedToSkipCharNum
while i < aLen:
order, charLen = self.get_order(aBuf[i:i + 2])
i += charLen
if i > aLen:
self._mNeedToSkipCharNum = i - aLen
self._mLastCharOrder = -1
else:
if (order != -1) and (self._mLastCharOrder != -1):
self._mTotalRel += 1
if self._mTotalRel > MAX_REL_THRESHOLD:
self._mDone = True
break
self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1
self._mLastCharOrder = order
def got_enough_data(self):
return self._mTotalRel > ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._mTotalRel > MINIMUM_DATA_THRESHOLD:
return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel
else:
return DONT_KNOW
def get_order(self, aBuf):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
self.charset_name = "SHIFT_JIS"
def get_charset_name(self):
return self.charset_name
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)):
charLen = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self.charset_name = "CP932"
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, charLen
return -1, charLen
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, aBuf):
if not aBuf:
return -1, 1
# find out current char's byte length
first_char = wrap_ord(aBuf[0])
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
charLen = 2
elif first_char == 0x8F:
charLen = 3
else:
charLen = 1
# return its order if it is hiragana
if len(aBuf) > 1:
second_char = wrap_ord(aBuf[1])
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, charLen
return -1, charLen
# flake8: noqa
| gpl-3.0 |
nschloe/pynosh | pynosh/modelevaluator_nls.py | 1 | 15288 | # -*- coding: utf-8 -*-
#
"""
Provide information around the nonlinear Schrรถdinger equations.
"""
import numpy
from scipy import sparse
import warnings
import krypy
class NlsModelEvaluator(object):
"""Nonlinear Schrรถdinger model evaluator class.
Incorporates
* Nonlinear Schrรถdinger: :math:`g=1.0, V=0.0, A=0.0`.
* Gross--Pitaevskii: :math:`g=1.0`, :math:`V` given, :math:`A=0.0`.
* Ginzburg--Landau: :math:`g=1.0, V=-1.0`,
and some magnetic potential :math:`A`.
"""
def __init__(
self, mesh, V=None, A=None, preconditioner_type="none", num_amg_cycles=numpy.inf
):
"""Initialization. Set mesh.
"""
self.dtype = complex
self.mesh = mesh
n = len(mesh.node_coords)
if V is None:
self._V = numpy.zeros(n)
else:
self._V = V
if A is None:
self._raw_magnetic_vector_potential = numpy.zeros((n, 3))
else:
self._raw_magnetic_vector_potential = A
self._keo_cache = None
self._keo_cache_mu = 0.0
self._edgecoeff_cache = None
self.tot_amg_cycles = []
self.cv_variant = "voronoi"
self._preconditioner_type = preconditioner_type
self._num_amg_cycles = num_amg_cycles
return
def compute_f(self, x, mu, g):
"""Computes the nonlinear Schrรถdinger residual
.. math::
GP(\\psi) = K\\psi + (V + g |\\psi|^2) \\psi
"""
keo = self._get_keo(mu)
if self.mesh.control_volumes is None:
self.mesh.compute_control_volumes(variant=self.cv_variant)
res = (keo * x) / self.mesh.control_volumes.reshape(x.shape) + (
self._V.reshape(x.shape) + g * abs(x) ** 2
) * x
return res
def get_jacobian(self, x, mu, g):
"""Returns a LinearOperator object that defines the matrix-vector
multiplication scheme for the Jacobian operator as in
.. math::
A \\phi + B \\phi^*
with
.. math::
A &= K + I (V + g \\cdot 2|\\psi|^2),\\\\
B &= g \\cdot diag( \\psi^2 ).
"""
def _apply_jacobian(phi):
if len(phi.shape) == 1:
shape = phi.shape
elif len(phi.shape) == 2:
# phi may be a vector of shape (n, k).
shape = (phi.shape[0], 1)
else:
raise ValueError("Illegal phi.")
y = (
(keo * phi) / self.mesh.control_volumes.reshape(shape)
+ alpha.reshape(shape) * phi
+ gPsi0Squared.reshape(shape) * phi.conj()
)
return y
assert x is not None
keo = self._get_keo(mu)
if self.mesh.control_volumes is None:
self.mesh.compute_control_volumes(variant=self.cv_variant)
alpha = self._V.reshape(x.shape) + g * 2.0 * (x.real ** 2 + x.imag ** 2)
gPsi0Squared = g * x ** 2
num_unknowns = len(self.mesh.node_coords)
return krypy.utils.LinearOperator(
(num_unknowns, num_unknowns),
self.dtype,
dot=_apply_jacobian,
dot_adj=_apply_jacobian,
)
def get_jacobian_blocks(self, x, mu, g):
"""Returns
.. math::
A &= K + I (V + g \\cdot 2|\\psi|^2),\\\\
B &= g \\cdot diag( \\psi^2 ).
"""
assert x is not None
if self.mesh.control_volumes is None:
self.mesh.compute_control_volumes(variant=self.cv_variant)
A = self._get_keo(mu).copy()
diag = A.diagonal()
alpha = self._V.reshape(x.shape) + g * 2.0 * (x.real ** 2 + x.imag ** 2)
diag += alpha.reshape(diag.shape) * self.mesh.control_volumes.reshape(x.shape)
A.setdiag(diag)
num_nodes = len(self.mesh.node_coords)
from scipy.sparse import spdiags
B = spdiags(
g * x ** 2 * self.mesh.control_volumes.reshape(x.shape),
[0],
num_nodes,
num_nodes,
)
return A, B
def get_preconditioner(self, x, mu, g):
"""Return the preconditioner.
"""
if self._preconditioner_type == "none":
return None
if self._preconditioner_type == "cycles":
warnings.warn(
"Preconditioner inverted approximately with "
"%d AMG cycles, so get_preconditioner() isn't exact."
% self._num_amg_cycles
)
def _apply_precon(phi):
return (keo * phi) / self.mesh.control_volumes.reshape(
phi.shape
) + alpha.reshape(phi.shape) * phi
# + beta.reshape(phi.shape) * phi.conj()
assert x is not None
keo = self._get_keo(mu)
if self.mesh.control_volumes is None:
self.mesh.compute_control_volumes(variant=self.cv_variant)
if g > 0.0:
alpha = g * 2.0 * (x.real ** 2 + x.imag ** 2)
# beta = g * x**2
else:
alpha = numpy.zeros(len(x))
num_unknowns = len(self.mesh.node_coords)
return krypy.utils.LinearOperator(
(num_unknowns, num_unknowns), self.dtype, dot=_apply_precon
)
def get_preconditioner_inverse(self, x, mu, g):
"""Use AMG to invert M approximately.
"""
if self._preconditioner_type == "none":
return None
import pyamg
num_unknowns = len(x)
def _apply_inverse_prec_exact(phi):
assert len(phi.shape) == 2
assert len(self.mesh.control_volumes.shape) == 1
rhs = numpy.empty(phi.shape, dtype=phi.dtype)
sol = numpy.empty(phi.shape, dtype=phi.dtype)
for i in range(phi.shape[1]):
rhs = self.mesh.control_volumes * phi[:, i]
linear_system = krypy.linsys.LinearSystem(
prec, rhs, M=amg_prec, self_adjoint=True, positive_definite=True
)
x_init = numpy.zeros((num_unknowns, 1), dtype=complex)
out = krypy.linsys.Cg(
linear_system,
x0=x_init,
tol=1.0e-13,
# explicit_residual = False
)
sol[:, i] = out.xk[:, 0]
# Forget about the cycle used to gauge the residual norm.
self.tot_amg_cycles += [len(out.resnorms) - 1]
return sol
def _apply_inverse_prec_cycles(phi):
rhs = self.mesh.control_volumes.reshape((phi.shape[0], 1)) * phi
x_init = numpy.zeros((num_unknowns, 1), dtype=complex)
x = numpy.empty(phi.shape, dtype=complex)
residuals = []
for i in range(rhs.shape[1]):
x[:, i] = prec_amg_solver.solve(
rhs[:, i],
x0=x_init,
maxiter=self._num_amg_cycles,
tol=0.0,
accel=None,
residuals=residuals,
)
# Alternative for one cycle:
# amg_prec = prec_amg_solver.aspreconditioner( cycle='V' )
# x = amg_prec * rhs
self.tot_amg_cycles += [self._num_amg_cycles]
return x
keo = self._get_keo(mu)
if g > 0.0:
if self.mesh.control_volumes is None:
self.mesh.compute_control_volumes(variant=self.cv_variant)
# don't use .setdiag,
# cf. https://github.com/scipy/scipy/issues/3501
alpha = (
g
* 2.0
* (x.real ** 2 + x.imag ** 2)
* self.mesh.control_volumes.reshape(x.shape)
)
prec = keo + sparse.spdiags(alpha[:, 0], [0], num_unknowns, num_unknowns)
else:
prec = keo
# The preconditioner assumes the eigenvalue 0 iff mu=0 and psi=0.
# This may lead to problems if mu=0 and the Newton iteration
# converges to psi=0 for psi0 != 0.
# import scipy.sparse.linalg
# lambd, v = scipy.sparse.linalg.eigs(prec, which='SM')
# assert all(abs(lambd.imag) < 1.0e-15)
# print '||psi||^2 = %g' % numpy.linalg.norm(absPsi0Squared)
# print 'lambda =', lambd.real
prec_amg_solver = pyamg.smoothed_aggregation_solver(
prec,
strength=("evolution", {"epsilon": 4.0, "k": 2, "proj_type": "l2"}),
smooth=(
"energy",
{"weighting": "local", "krylov": "cg", "degree": 2, "maxiter": 3},
),
improve_candidates=None,
aggregate="standard",
presmoother=("block_gauss_seidel", {"sweep": "symmetric", "iterations": 1}),
postsmoother=(
"block_gauss_seidel",
{"sweep": "symmetric", "iterations": 1},
),
max_levels=25,
coarse_solver="splu",
)
# print 'operator complexity', prec_amg_solver.operator_complexity()
# print 'cycle complexity', prec_amg_solver.cycle_complexity('V')
if self._preconditioner_type == "cycles":
if self._num_amg_cycles == numpy.inf:
raise ValueError("Invalid number of cycles.")
return krypy.utils.LinearOperator(
(num_unknowns, num_unknowns), self.dtype, dot=_apply_inverse_prec_cycles
)
elif self._preconditioner_type == "exact":
amg_prec = prec_amg_solver.aspreconditioner(cycle="V")
return krypy.utils.LinearOperator(
(num_unknowns, num_unknowns),
dtype=self.dtype,
dot=_apply_inverse_prec_exact,
)
else:
raise ValueError(
"Unknown preconditioner type " "%s" "." % self._preconditioner_type
)
def _get_preconditioner_inverse_directsolve(self, x, mu, g):
"""Use a direct solver for M^{-1}.
"""
from scipy.sparse.linalg import spsolve
def _apply_inverse_prec(phi):
return spsolve(prec, phi)
prec = self.get_preconditioner(x, mu, g)
num_unknowns = len(x)
return krypy.Utils.LinearOperator(
(num_unknowns, num_unknowns), _apply_inverse_prec, dtype=self.dtype
)
def inner_product(self, phi0, phi1):
"""The natural inner product of the problem.
"""
assert phi0.shape[0] == phi1.shape[0], (
"Input vectors not matching.",
phi0.shape,
phi1.shape,
)
if self.mesh.control_volumes is None:
self.mesh.compute_control_volumes(variant=self.cv_variant)
if len(phi0.shape) == 1:
scaledPhi0 = self.mesh.control_volumes * phi0
elif len(phi0.shape) == 2:
scaledPhi0 = self.mesh.control_volumes.reshape((phi0.shape[0], 1)) * phi0
# numpy.vdot only works for vectors, so use numpy.dot(....T.conj()) here.
return numpy.dot(scaledPhi0.T.conj(), phi1).real
def energy(self, psi):
"""Compute the Gibbs free energy.
Not really a norm, but a good measure for our purposes here.
"""
if self.mesh.control_volumes is None:
self.mesh.compute_control_volumes(variant=self.cv_variant)
alpha = -self.inner_product(psi ** 2, psi ** 2)
return alpha.real / self.mesh.control_volumes.sum()
def _get_keo(self, mu):
"""Assemble the kinetic energy operator."""
if self._keo_cache is None or self._keo_cache_mu != mu:
# Create the matrix structure.
num_nodes = len(self.mesh.node_coords)
mvp_edge_cache = self._build_mvp_edge_cache(mu)
edge = self.mesh.idx_hierarchy.reshape(2, -1)
alpha = self.mesh.ce_ratios.reshape(-1)
alphaExp0 = alpha * numpy.exp(1j * mvp_edge_cache.reshape(-1))
row = numpy.concatenate([edge[0], edge[0], edge[1], edge[1]])
col = numpy.concatenate([edge[0], edge[1], edge[0], edge[1]])
data = numpy.concatenate([alpha, -alphaExp0.conj(), -alphaExp0, alpha])
self._keo_cache = sparse.csr_matrix(
(data, (row, col)), (num_nodes, num_nodes)
)
# transform the matrix into the more efficient CSR format
self._keo_cache_mu = mu
return self._keo_cache
def _build_mvp_edge_cache(self, mu):
"""Builds the cache for the magnetic vector potential."""
# Approximate the integral
#
# I = \int_{x0}^{xj} (xj-x0)/|xj-x0| . A(x) dx
#
# numerically by the midpoint rule, i.e.,
#
# I ~ (xj-x0) . A( 0.5*(xj+x0) )
# ~ (xj-x0) . 0.5*( A(xj) + A(x0) )
#
# The following computes the dot-products of all those
# edges[i], mvp[i], and put the result in the cache.
half_edges = (
self.mesh.node_coords[self.mesh.idx_hierarchy[1]]
- self.mesh.node_coords[self.mesh.idx_hierarchy[0]]
)
mvp = 0.5 * (
self._get_mvp(mu, self.mesh.idx_hierarchy[1])
+ self._get_mvp(mu, self.mesh.idx_hierarchy[0])
)
return numpy.sum(half_edges * mvp, -1)
def _get_mvp(self, mu, index):
return mu * self._raw_magnetic_vector_potential[index]
# def keo_smallest_eigenvalue_approximation(self):
# '''Returns
# <v,Av> / <v,v>
# with v = ones and A = KEO - Laplace.
# This is linear approximation for the smallest magnitude eigenvalue
# of KEO.
# '''
# num_nodes = len(self.mesh.nodes)
# # compute the FVM entities for the mesh
# if self._edge_lengths is None or self._coedge_edge_ratios is None:
# self._create_fvm_entities()
# k = 0
# sum = 0.0
# for element in self.mesh.cells:
# # loop over the edges
# l = 0
# for edge in element.edges:
# # -----------------------------------------------------------
# # Compute the integral
# #
# # I = \int_{x0}^{xj} (xj-x0).A(x) dx
# #
# # numerically by the midpoint rule, i.e.,
# #
# # I ~ |xj-x0| * (xj-x0) . A( 0.5*(xj+x0) ).
# #
# node0 = self.mesh.nodes[edge[0]]
# node1 = self.mesh.nodes[edge[1]]
# midpoint = 0.5 * (node0 + node1)
# # Instead of projecting onto the normalized edge and then
# # multiplying with the edge length for the approximation of
# # the integral, just project on the not normalized edge.
# a_integral = numpy.dot(
# node1 - node0,
# self._magnetic_vector_potential(midpoint)
# )
# # sum it in
# sum += 2.0 * self._coedge_edge_ratios[k][l] * \
# (1.0 - math.cos(a_integral))
# l += 1
# k += 1
# return sum / len(self.mesh.nodes)
| mit |
SmartInfrastructures/fuel-web-dev | nailgun/nailgun/test/integration/test_node_handler.py | 1 | 12873 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nailgun import consts
from nailgun import objects
from nailgun.db.sqlalchemy.models import Node
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.utils import reverse
class TestHandlers(BaseIntegrationTest):
def test_node_get(self):
node = self.env.create_node(api=False)
resp = self.app.get(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.assertEqual(node.id, resp.json_body['id'])
self.assertEqual(node.name, resp.json_body['name'])
self.assertEqual(node.mac, resp.json_body['mac'])
self.assertEqual(
node.pending_addition, resp.json_body['pending_addition'])
self.assertEqual(
node.pending_deletion, resp.json_body['pending_deletion'])
self.assertEqual(node.status, resp.json_body['status'])
self.assertEqual(
node.meta['cpu']['total'],
resp.json_body['meta']['cpu']['total']
)
self.assertEqual(node.meta['disks'], resp.json_body['meta']['disks'])
self.assertEqual(node.meta['memory'], resp.json_body['meta']['memory'])
def test_node_creation_fails_with_wrong_id(self):
node_id = '080000000003'
resp = self.app.post(
reverse('NodeCollectionHandler'),
jsonutils.dumps({'id': node_id,
'mac': self.env.generate_random_mac(),
'status': 'discover'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(400, resp.status_code)
def test_node_deletion(self):
node = self.env.create_node(api=False)
resp = self.app.delete(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
"",
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 200)
def test_node_valid_metadata_gets_updated(self):
new_metadata = self.env.default_metadata()
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'meta': new_metadata}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
self.db.refresh(node)
nodes = self.db.query(Node).filter(
Node.id == node.id
).all()
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].meta, new_metadata)
def test_node_hostname_gets_updated(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.db.refresh(node)
# lets put the same hostname again
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.db.refresh(node)
nodes = self.db.query(Node).filter(
Node.id == node.id
).all()
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].hostname, 'new-name')
def test_node_hostname_gets_updated_invalid(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': '!#invalid_%&name'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(400, resp.status_code)
def test_node_hostname_gets_updated_ssl_conflict(self):
cluster = self.env.create_cluster(api=False)
node = self.env.create_node(cluster_id=cluster.id)
cluster_attrs = objects.Cluster.get_attributes(cluster).editable
test_hostname = 'test-hostname'
cluster_attrs['public_ssl']['hostname']['value'] = test_hostname
objects.Cluster.update_attributes(
cluster, {'editable': cluster_attrs})
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': test_hostname}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(400, resp.status_code)
self.assertEqual(
"New hostname '{0}' conflicts with public TLS endpoint"
.format(test_hostname), resp.json_body['message'])
def test_node_hostname_gets_updated_after_provisioning_starts(self):
node = self.env.create_node(api=False,
status=consts.NODE_STATUSES.provisioning)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(403, resp.status_code)
self.assertEqual(
'Node hostname may be changed only before provisioning.',
resp.json_body['message'])
def test_node_hostname_gets_updated_duplicate(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers)
self.assertEqual(200, resp.status_code)
self.db.refresh(node)
node_2 = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node_2.id}),
jsonutils.dumps({'hostname': 'new-name'}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(409, resp.status_code)
def test_node_valid_status_gets_updated(self):
node = self.env.create_node(api=False)
params = {'status': 'error'}
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps(params),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
def test_node_action_flags_are_set(self):
flags = ['pending_addition', 'pending_deletion']
node = self.env.create_node(api=False)
for flag in flags:
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps({flag: True}),
headers=self.default_headers
)
self.assertEqual(resp.status_code, 200)
self.db.refresh(node)
node_from_db = self.db.query(Node).filter(
Node.id == node.id
).first()
for flag in flags:
self.assertEqual(getattr(node_from_db, flag), True)
def test_put_returns_400_if_no_body(self):
node = self.env.create_node(api=False)
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
"",
headers=self.default_headers,
expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_put_returns_400_if_wrong_status(self):
node = self.env.create_node(api=False)
params = {'status': 'invalid_status'}
resp = self.app.put(
reverse('NodeHandler', kwargs={'obj_id': node.id}),
jsonutils.dumps(params),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(resp.status_code, 400)
def test_do_not_create_notification_if_disks_meta_is_empty(self):
def get_notifications_count(**kwargs):
return objects.NotificationCollection.count(
objects.NotificationCollection.filter_by(None, **kwargs)
)
self.env.create(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
]
)
node = self.env.nodes[0]
node.meta['disks'] = []
node = {
'id': node.id,
'meta': node.meta,
'mac': node.mac,
'status': node.status
}
before_count = get_notifications_count(node_id=node['id'])
for i in range(5):
response = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node),
headers=self.default_headers
)
self.assertEqual(response.status_code, 200)
# check there's no notification created
after_count = get_notifications_count(node_id=node['id'])
self.assertEqual(before_count, after_count)
def test_no_volumes_changes_if_node_is_locked(self):
self.env.create(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True,
'status': consts.NODE_STATUSES.ready},
]
)
node = self.env.nodes[0]
node_data = {
'id': node.id,
'meta': node.meta,
'mac': node.mac,
'status': node.status
}
node_data['meta']['disks'] = []
response = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps(node_data),
headers=self.default_headers
)
self.assertEqual(response.status_code, 200)
# check volumes data wasn't reset
self.assertGreater(len(node.meta['disks']), 0)
@fake_tasks()
def test_interface_changes_for_new_node(self):
# Creating cluster with node
self.env.create(
cluster_kwargs={
'name': 'test_name'
},
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True}
]
)
cluster = self.env.clusters[0]
def filter_changes(chg_type, chg_list):
return filter(lambda x: x.get('name') == chg_type, chg_list)
changes = filter_changes(
consts.CLUSTER_CHANGES.interfaces,
cluster['changes']
)
# Checking interfaces change added after node creation
self.assertEquals(1, len(changes))
deployment_task = self.env.launch_deployment()
self.env.wait_ready(deployment_task)
changes = filter_changes(
consts.CLUSTER_CHANGES.interfaces,
cluster['changes']
)
# Checking no interfaces change after deployment
self.assertEquals(0, len(changes))
def test_update_node_with_wrong_ip(self):
node = self.env.create_node(
api=False, ip='10.20.0.2',
status=consts.NODE_STATUSES.deploying)
ipaddress = '192.168.0.10'
self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers)
self.assertNotEqual(node.ip, ipaddress)
ipaddress = '10.20.0.25'
self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers)
self.assertEqual(node.ip, ipaddress)
def test_update_node_with_none_ip(self):
node = self.env.create_node(api=False, ip='10.20.0.2')
ipaddress = None
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers,
expect_errors=True)
self.assertEqual(resp.status_code, 400)
ipaddress = '10.20.0.4'
resp = self.app.put(
reverse('NodeAgentHandler'),
jsonutils.dumps({'id': node.id,
'ip': ipaddress}),
headers=self.default_headers)
self.assertEqual(resp.status_code, 200)
| apache-2.0 |
junwucs/h2o-3 | py2/testdir_multi_jvm/test_GLM_basic_2.py | 21 | 3529 | import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm
from h2o_test import verboseprint, dump_json, OutputObj
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(3)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM_basic_2(self):
importFolderPath = "logreg"
csvFilename = "prostate.csv"
hex_key = "prostate.hex"
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=hex_key,
check_header=1, timeoutSecs=180, doSummary=False)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
expected = []
allowedDelta = 0
labelListUsed = list(labelList)
labelListUsed.remove('ID')
labelListUsed.remove('CAPSULE')
numColsUsed = numCols - 2
for trial in range(1):
# family [u'gaussian', u'binomial', u'poisson', u'gamma', u'tweedie']
# link [u'family_default', u'identity', u'logit', u'log', u'inverse', u'tweedie']
# can we do classification with probabilities?
# are only lambda and alpha grid searchable?
parameters = {
'validation_frame': parse_key,
'ignored_columns': '[ID]',
'response_column': 'CAPSULE',
# FIX! when is this needed? redundant for binomial?
'balance_classes': False,
'max_after_balance_size': None,
'standardize': False,
'family': 'binomial',
'link': None,
'tweedie_variance_power': None,
'tweedie_link_power': None,
'alpha': '[1e-4]',
'lambda': '[0.5]',
'prior1': None,
'lambda_search': None,
'nlambdas': None,
'lambda_min_ratio': None,
'use_all_factor_levels': False,
# NPE with n_folds 2?
'n_folds': 1,
}
model_key = 'prostate_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
destination_key=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=10)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult, 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
h2o_cmd.runStoreView()
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
40423241/2016fallcadp_hw | plugin/liquid_tags/include_code.py | 246 | 4490 | """
Include Code Tag
----------------
This implements a Liquid-style video tag for Pelican,
based on the octopress video tag [1]_
Syntax
------
{% include_code path/to/code [lang:python] [Title text] [codec:utf8] %}
The "path to code" is specified relative to the ``code`` subdirectory of
the content directory Optionally, this subdirectory can be specified in the
config file:
CODE_DIR = 'code'
If your input file is not ASCII/UTF-8 encoded, you need to specify the
appropriate input codec by using the ``codec`` option.
Example ``codec:iso-8859-1``
Using this option does not affect the output encoding.
For a list of valid codec identifiers, see
https://docs.python.org/2/library/codecs.html#standard-encodings
Example
-------
{% include_code myscript.py %}
This will import myscript.py from content/code/myscript.py
and output the contents in a syntax highlighted code block inside a figure,
with a figcaption listing the file name and download link.
The file link will be valid only if the 'code' directory is listed
in the STATIC_PATHS setting, e.g.:
STATIC_PATHS = ['images', 'code']
[1] https://github.com/imathis/octopress/blob/master/plugins/include_code.rb
"""
import re
import os
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% include_code /path/to/code.py [lang:python] [lines:X-Y] [:hidefilename:] [title] %}"
FORMAT = re.compile(r"""
^(?:\s+)? # Allow whitespace at beginning
(?P<src>\S+) # Find the path
(?:\s+)? # Whitespace
(?:(?:lang:)(?P<lang>\S+))? # Optional language
(?:\s+)? # Whitespace
(?:(?:lines:)(?P<lines>\d+-\d+))? # Optional lines
(?:\s+)? # Whitespace
(?P<hidefilename>:hidefilename:)? # Hidefilename flag
(?:\s+)? # Whitespace
(?:(?:codec:)(?P<codec>\S+))? # Optional language
(?:\s+)? # Whitespace
(?P<title>.+)?$ # Optional title
""", re.VERBOSE)
@LiquidTags.register('include_code')
def include_code(preprocessor, tag, markup):
title = None
lang = None
src = None
match = FORMAT.search(markup)
if match:
argdict = match.groupdict()
title = argdict['title'] or ""
lang = argdict['lang']
codec = argdict['codec'] or "utf8"
lines = argdict['lines']
hide_filename = bool(argdict['hidefilename'])
if lines:
first_line, last_line = map(int, lines.split("-"))
src = argdict['src']
if not src:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
code_dir = preprocessor.configs.getConfig('CODE_DIR')
code_path = os.path.join('content', code_dir, src)
if not os.path.exists(code_path):
raise ValueError("File {0} could not be found".format(code_path))
with open(code_path) as fh:
if lines:
code = fh.readlines()[first_line - 1: last_line]
code[-1] = code[-1].rstrip()
code = "".join(code)
else:
code = fh.read()
if not title and hide_filename:
raise ValueError("Either title must be specified or filename must "
"be available")
if not hide_filename:
title += " %s" % os.path.basename(src)
if lines:
title += " [Lines %s]" % lines
title = title.strip()
url = '/{0}/{1}'.format(code_dir, src)
url = re.sub('/+', '/', url)
open_tag = ("<figure class='code'>\n<figcaption><span>{title}</span> "
"<a href='{url}'>download</a></figcaption>".format(title=title,
url=url))
close_tag = "</figure>"
# store HTML tags in the stash. This prevents them from being
# modified by markdown.
open_tag = preprocessor.configs.htmlStash.store(open_tag, safe=True)
close_tag = preprocessor.configs.htmlStash.store(close_tag, safe=True)
if lang:
lang_include = ':::' + lang + '\n '
else:
lang_include = ''
source = (open_tag
+ '\n\n '
+ lang_include
+ '\n '.join(code.decode(codec).split('\n')) + '\n\n'
+ close_tag + '\n')
return source
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| agpl-3.0 |
thnee/ansible | test/units/modules/network/exos/test_exos_command.py | 38 | 4585 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.exos import exos_command
from .exos_module import TestExosModule, load_fixture
class TestExosCommandModule(TestExosModule):
module = exos_command
def setUp(self):
super(TestExosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.exos.exos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestExosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_exos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Switch :'))
def test_exos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Switch :'))
def test_exos_command_wait_for(self):
wait_for = 'result[0] contains "Switch :"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_exos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_exos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_exos_command_match_any(self):
wait_for = ['result[0] contains "Switch"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_exos_command_match_all(self):
wait_for = ['result[0] contains "Switch"',
'result[0] contains "Switch :"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_exos_command_match_all_failure(self):
wait_for = ['result[0] contains "Switch :"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_exos_command_configure_error(self):
commands = ['disable ospf']
set_module_args({
'commands': commands,
'_ansible_check_mode': True,
})
result = self.execute_module()
self.assertEqual(
result['warnings'],
['only show commands are supported when using check mode, not executing `disable ospf`']
)
| gpl-3.0 |
jackbrucesimpson/beeunique | src/Processor/PathAssembly/processpaths.py | 1 | 2618 | from beedata import BeeData
from Processor.Utils.imageutils import gen_gap_coords
from Processor.Utils import constants
class ProcessPaths(object):
def __init__(self):
pass
def process_paths(self, bee_df):
x_list = bee_df['x'].tolist()
y_list = bee_df['y'].tolist()
frame_nums_list = bee_df['frame_nums'].tolist()
classifications_list = bee_df['classifications'].tolist()
bee_data = BeeData(classifications_list[0])
path_frame_num_start = frame_nums_list[0]
path_frame_num_end = frame_nums_list[0]
x_path = [x_list[0]]
y_path = [y_list[0]]
for i in range(1, len(x_list)):
difference_prev_frame = frame_nums_list[i] - path_frame_num_end
if difference_prev_frame == 1:
path_frame_num_end = frame_nums_list[i]
x_path.append(x_list[i])
y_path.append(y_list[i])
bee_data.add_classification(classifications_list[i])
elif difference_prev_frame < constants.MAX_FRAME_GAP_BETWEEN_PATHS:
path_frame_num_end = frame_nums_list[i]
generated_coord_gaps = gen_gap_coords(x_list[i], y_list[i], x_list[i-1], y_list[i-1], difference_prev_frame)
fill_path_classifications_gap = [constants.GAP_CLASS] * len(generated_coord_gaps['x'])
fill_path_classifications_gap[-1] = classifications_list[i]
x_path.extend(generated_coord_gaps['x'])
y_path.extend(generated_coord_gaps['y'])
for gap_classification in fill_path_classifications_gap:
bee_data.add_classification(gap_classification)
else:
bee_data.list_of_all_x_paths.append(x_path)
bee_data.list_of_all_y_paths.append(y_path)
bee_data.start_frame_num_all_paths.append(path_frame_num_start)
x_path = [x_list[i]]
y_path = [y_list[i]]
path_frame_num_start = frame_nums_list[i]
path_frame_num_end = frame_nums_list[i]
bee_data.add_classification(classifications_list[i])
if len(x_path) > 0:
bee_data.list_of_all_x_paths.append(x_path)
bee_data.list_of_all_y_paths.append(y_path)
bee_data.start_frame_num_all_paths.append(path_frame_num_start)
bee_data.identify_freq_class_path_group()
bee_data.merge_group_classifications_into_sections()
bees_identified_by_tag = bee_data.gen_separate_tag_class_bees()
return bees_identified_by_tag
| mit |
team-xue/xue | xue/accounts/migrations/0015_delete_major_attr_in_favor_of_klass.py | 1 | 7144 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'DMUserProfile.major'
db.delete_column('accounts_dmuserprofile', 'major')
def backwards(self, orm):
# Adding field 'DMUserProfile.major'
db.add_column('accounts_dmuserprofile', 'major', self.gf('django.db.models.fields.CharField')(default='', max_length=16, blank=True), keep_default=False)
models = {
'accounts.dmuserprofile': {
'Meta': {'object_name': 'DMUserProfile'},
'awards': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'english_band_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'english_band_type': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'ethnic': ('django.db.models.fields.IntegerField', [], {}),
'gender': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'health': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64', 'blank': 'True'}),
'high_school': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'hobby': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_number': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'join_date': ('django.db.models.fields.DateField', [], {}),
'klass': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['classes.LogicalClass']", 'null': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'zh'", 'max_length': '5'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'nickname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'political': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'role': ('django.db.models.fields.IntegerField', [], {}),
'sign_line': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '128', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'classes.logicalclass': {
'Meta': {'object_name': 'LogicalClass'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['classes.Major']"}),
'seq': ('django.db.models.fields.IntegerField', [], {})
},
'classes.major': {
'Meta': {'object_name': 'Major'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts']
| bsd-3-clause |
paul-herrmann/server-examples | python/django-fine-uploader/fine_uploader/utils.py | 4 | 1385 | import os, os.path, shutil
def combine_chunks(total_parts, total_size, source_folder, dest):
""" Combine a chunked file into a whole file again. Goes through each part
, in order, and appends that part's bytes to another destination file.
Chunks are stored in media/chunks
Uploads are saved in media/uploads
"""
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with open(dest, 'wb+') as destination:
for i in xrange(total_parts):
part = os.path.join(source_folder, str(i))
with open(part, 'rb') as source:
destination.write(source.read())
def save_upload(f, path):
""" Save an upload. Django will automatically "chunk" incoming files
(even when previously chunked by fine-uploader) to prevent large files
from taking up your server's memory. If Django has chunked the file, then
write the chunks, otherwise, save as you would normally save a file in
Python.
Uploads are stored in media/uploads
"""
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb+') as destination:
if hasattr(f, 'multiple_chunks') and f.multiple_chunks():
for chunk in f.chunks():
destination.write(chunk)
else:
destination.write(f.read())
| mit |
reyha/zulip | zerver/lib/ccache.py | 9 | 7580 | from __future__ import absolute_import
from typing import Any, Dict, Optional
#!/usr/bin/env python
# This file is adapted from samples/shellinabox/ssh-krb-wrapper in
# https://github.com/davidben/webathena, which has the following
# license:
#
# Copyright (c) 2013 David Benjamin and Alan Huang
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import struct
import six
# Some DER encoding stuff. Bleh. This is because the ccache contains a
# DER-encoded krb5 Ticket structure, whereas Webathena deserializes
# into the various fields. Re-encoding in the client would be easy as
# there is already an ASN.1 implementation, but in the interest of
# limiting MIT Kerberos's exposure to malformed ccaches, encode it
# ourselves. To that end, here's the laziest DER encoder ever.
def der_encode_length(l):
# type: (int) -> str
if l <= 127:
return chr(l)
out = ""
while l > 0:
out = chr(l & 0xff) + out
l >>= 8
out = chr(len(out) | 0x80) + out
return out
def der_encode_tlv(tag, value):
# type: (int, str) -> str
return chr(tag) + der_encode_length(len(value)) + value
def der_encode_integer_value(val):
# type: (int) -> str
if not isinstance(val, six.integer_types):
raise TypeError("int")
# base 256, MSB first, two's complement, minimum number of octets
# necessary. This has a number of annoying edge cases:
# * 0 and -1 are 0x00 and 0xFF, not the empty string.
# * 255 is 0x00 0xFF, not 0xFF
# * -256 is 0xFF 0x00, not 0x00
# Special-case to avoid an empty encoding.
if val == 0:
return "\x00"
sign = 0 # What you would get if you sign-extended the current high bit.
out = ""
# We can stop once sign-extension matches the remaining value.
while val != sign:
byte = val & 0xff
out = chr(byte) + out
sign = -1 if byte & 0x80 == 0x80 else 0
val >>= 8
return out
def der_encode_integer(val):
# type: (int) -> str
return der_encode_tlv(0x02, der_encode_integer_value(val))
def der_encode_int32(val):
# type: (int) -> str
if val < -2147483648 or val > 2147483647:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_uint32(val):
# type: (int) -> str
if val < 0 or val > 4294967295:
raise ValueError("Bad value")
return der_encode_integer(val)
def der_encode_string(val):
# type: (six.text_type) -> str
if not isinstance(val, six.text_type):
raise TypeError("unicode")
return der_encode_tlv(0x1b, val.encode("utf-8"))
def der_encode_octet_string(val):
# type: (str) -> str
if not isinstance(val, str):
raise TypeError("str")
return der_encode_tlv(0x04, val)
def der_encode_sequence(tlvs, tagged=True):
# type: (List[str], Optional[bool]) -> str
body = []
for i, tlv in enumerate(tlvs):
# Missing optional elements represented as None.
if not tlv:
continue
if tagged:
# Assume kerberos-style explicit tagging of components.
tlv = der_encode_tlv(0xa0 | i, tlv)
body.append(tlv)
return der_encode_tlv(0x30, "".join(body))
def der_encode_ticket(tkt):
# type: (Dict[str, Any]) -> str
return der_encode_tlv(
0x61, # Ticket
der_encode_sequence(
[der_encode_integer(5), # tktVno
der_encode_string(tkt["realm"]),
der_encode_sequence( # PrincipalName
[der_encode_int32(tkt["sname"]["nameType"]),
der_encode_sequence([der_encode_string(c)
for c in tkt["sname"]["nameString"]],
tagged=False)]),
der_encode_sequence( # EncryptedData
[der_encode_int32(tkt["encPart"]["etype"]),
(der_encode_uint32(tkt["encPart"]["kvno"])
if "kvno" in tkt["encPart"]
else None),
der_encode_octet_string(
base64.b64decode(tkt["encPart"]["cipher"]))])]))
# Kerberos ccache writing code. Using format documentation from here:
# http://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
def ccache_counted_octet_string(data):
# type: (str) -> bytes
if not isinstance(data, str):
raise TypeError("str")
return struct.pack("!I", len(data)) + data
def ccache_principal(name, realm):
# type: (Dict[str, str], str) -> str
header = struct.pack("!II", name["nameType"], len(name["nameString"]))
return (header + ccache_counted_octet_string(realm.encode("utf-8")) +
"".join(ccache_counted_octet_string(c.encode("utf-8"))
for c in name["nameString"]))
def ccache_key(key):
# type: (Dict[str, str]) -> bytes
return (struct.pack("!H", key["keytype"]) +
ccache_counted_octet_string(base64.b64decode(key["keyvalue"])))
def flags_to_uint32(flags):
# type: (List[str]) -> int
ret = 0
for i, v in enumerate(flags):
if v:
ret |= 1 << (31 - i)
return ret
def ccache_credential(cred):
# type: (Dict[str, Any]) -> str
out = ccache_principal(cred["cname"], cred["crealm"])
out += ccache_principal(cred["sname"], cred["srealm"])
out += ccache_key(cred["key"])
out += struct.pack("!IIII",
cred["authtime"] // 1000,
cred.get("starttime", cred["authtime"]) // 1000,
cred["endtime"] // 1000,
cred.get("renewTill", 0) // 1000)
out += struct.pack("!B", 0)
out += struct.pack("!I", flags_to_uint32(cred["flags"]))
# TODO: Care about addrs or authdata? Former is "caddr" key.
out += struct.pack("!II", 0, 0)
out += ccache_counted_octet_string(der_encode_ticket(cred["ticket"]))
# No second_ticket.
out += ccache_counted_octet_string("")
return out
def make_ccache(cred):
# type: (Dict[str, Any]) -> str
# Do we need a DeltaTime header? The ccache I get just puts zero
# in there, so do the same.
out = struct.pack("!HHHHII",
0x0504, # file_format_version
12, # headerlen
1, # tag (DeltaTime)
8, # taglen (two uint32_ts)
0, 0, # time_offset / usec_offset
)
out += ccache_principal(cred["cname"], cred["crealm"])
out += ccache_credential(cred)
return out
| apache-2.0 |
kartikm/hy | tests/test_models.py | 2 | 4870 | # Copyright 2018 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
import copy
import hy
from clint.textui.colored import clean
from hy._compat import long_type, str_type
from hy.models import (wrap_value, replace_hy_obj, HyString, HyInteger, HyList,
HyDict, HySet, HyExpression, HyComplex, HyFloat, pretty)
def test_wrap_long_type():
""" Test conversion of integers."""
wrapped = wrap_value(long_type(0))
assert type(wrapped) == HyInteger
def test_wrap_tuple():
""" Test conversion of tuples."""
wrapped = wrap_value((HyInteger(0),))
assert type(wrapped) == HyList
assert type(wrapped[0]) == HyInteger
assert wrapped == HyList([HyInteger(0)])
def test_wrap_nested_expr():
""" Test conversion of HyExpressions with embedded non-HyObjects."""
wrapped = wrap_value(HyExpression([long_type(0)]))
assert type(wrapped) == HyExpression
assert type(wrapped[0]) == HyInteger
assert wrapped == HyExpression([HyInteger(0)])
def test_replace_long_type():
""" Test replacing integers."""
replaced = replace_hy_obj(long_type(0), HyInteger(13))
assert replaced == HyInteger(0)
def test_replace_string_type():
"""Test replacing python string"""
replaced = replace_hy_obj(str_type("foo"), HyString("bar"))
assert replaced == HyString("foo")
def test_replace_tuple():
""" Test replacing tuples."""
replaced = replace_hy_obj((long_type(0), ), HyInteger(13))
assert type(replaced) == HyList
assert type(replaced[0]) == HyInteger
assert replaced == HyList([HyInteger(0)])
def test_list_add():
"""Check that adding two HyLists generates a HyList"""
a = HyList([1, 2, 3])
b = HyList([3, 4, 5])
c = a + b
assert c == [1, 2, 3, 3, 4, 5]
assert c.__class__ == HyList
def test_list_slice():
"""Check that slicing a HyList produces a HyList"""
a = HyList([1, 2, 3, 4])
sl1 = a[1:]
sl5 = a[5:]
assert type(sl1) == HyList
assert sl1 == HyList([2, 3, 4])
assert type(sl5) == HyList
assert sl5 == HyList([])
hydict = HyDict(["a", 1, "b", 2, "c", 3])
def test_dict_items():
assert hydict.items() == [("a", 1), ("b", 2), ("c", 3)]
def test_dict_keys():
assert hydict.keys() == ["a", "b", "c"]
def test_dict_values():
assert hydict.values() == [1, 2, 3]
hyset = HySet([3, 1, 2, 2])
def test_set():
assert hyset == [3, 1, 2, 2]
def test_number_model_copy():
i = HyInteger(42)
assert (i == copy.copy(i))
assert (i == copy.deepcopy(i))
f = HyFloat(42.)
assert (f == copy.copy(f))
assert (f == copy.deepcopy(f))
c = HyComplex(42j)
assert (c == copy.copy(c))
assert (c == copy.deepcopy(c))
PRETTY_STRINGS = {
k % ('[1.0] {1.0} (1.0) #{1.0}',):
v.format("""
HyList([
HyFloat(1.0)]),
HyDict([
HyFloat(1.0) # odd
]),
HyExpression([
HyFloat(1.0)]),
HySet([
HyFloat(1.0)])""")
for k, v in {'[%s]': 'HyList([{}])',
'#{%s}': 'HySet([{}])'}.items()}
PRETTY_STRINGS.update({
'{[1.0] {1.0} (1.0) #{1.0}}':
"""HyDict([
HyList([
HyFloat(1.0)]),
HyDict([
HyFloat(1.0) # odd
])
,
HyExpression([
HyFloat(1.0)]),
HySet([
HyFloat(1.0)])
])"""
,
'[1.0 1j [] {} () #{}]':
"""HyList([
HyFloat(1.0),
HyComplex(1j),
HyList(),
HyDict(),
HyExpression(),
HySet()])"""
,
'{{1j 2j} {1j 2j [][1j]} {[1j][] 1j 2j} {[1j][1j]}}':
"""HyDict([
HyDict([
HyComplex(1j), HyComplex(2j)]),
HyDict([
HyComplex(1j), HyComplex(2j),
HyList(),
HyList([
HyComplex(1j)])
])
,
HyDict([
HyList([
HyComplex(1j)]),
HyList()
,
HyComplex(1j), HyComplex(2j)]),
HyDict([
HyList([
HyComplex(1j)]),
HyList([
HyComplex(1j)])
])
])"""})
def test_compound_model_repr():
HY_LIST_MODELS = (HyExpression, HyDict, HySet, HyList)
with pretty(False):
for model in HY_LIST_MODELS:
assert eval(repr(model())).__class__ is model
assert eval(repr(model([1, 2]))) == model([1, 2])
assert eval(repr(model([1, 2, 3]))) == model([1, 2, 3])
for k, v in PRETTY_STRINGS.items():
# `str` should be pretty, even under `pretty(False)`.
assert clean(str(hy.read_str(k))) == v
for k in PRETTY_STRINGS.keys():
assert eval(repr(hy.read_str(k))) == hy.read_str(k)
with pretty(True):
for model in HY_LIST_MODELS:
assert eval(clean(repr(model()))).__class__ is model
assert eval(clean(repr(model([1, 2])))) == model([1, 2])
assert eval(clean(repr(model([1, 2, 3])))) == model([1, 2, 3])
for k, v in PRETTY_STRINGS.items():
assert clean(repr(hy.read_str(k))) == v
| mit |
ioanpocol/superdesk-core | superdesk/storage/desk_media_storage.py | 2 | 5576 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
import json
import mimetypes
import bson
import gridfs
import os.path
from eve.io.mongo.media import GridFSMediaStorage
logger = logging.getLogger(__name__)
def format_id(_id):
try:
return bson.ObjectId(_id)
except bson.errors.InvalidId:
return _id
class SuperdeskGridFSMediaStorage(GridFSMediaStorage):
def get(self, _id, resource=None):
logger.debug('Getting media file with id= %s' % _id)
_id = format_id(_id)
try:
media_file = self.fs(resource).get(_id)
except Exception:
media_file = None
if media_file and media_file.metadata:
for k, v in media_file.metadata.items():
if isinstance(v, str):
try:
media_file.metadata[k] = json.loads(v)
except ValueError:
logger.info('Non JSON metadata for file: %s with key: %s and value: %s', _id, k, v)
return media_file
def url_for_media(self, media_id, content_type=None):
"""Return url for given media id.
:param media_id: media id from media_id method
"""
ext = mimetypes.guess_extension(content_type or '') or ''
if ext in ('.jpe', '.jpeg'):
ext = '.jpg'
return self.app.upload_url(str(media_id) + ext)
def url_for_download(self, media_id, content_type=None):
"""Return url for download.
:param media_id: media id from media_id method
"""
return self.app.download_url(str(media_id))
def fetch_rendition(self, rendition):
return self.get(rendition.get('media'), 'upload')
def put(self, content, filename=None, content_type=None, metadata=None, resource=None, folder=None, **kwargs):
"""Store content in gridfs.
:param content: binary stream
:param filename: unique filename
:param content_type: mime type
:param metadata: file metadata
:param resource: type of resource
:param str folder: Folder that the file will be stored in
:return str: The ID that was generated for this object
"""
if '_id' in kwargs:
kwargs['_id'] = format_id(kwargs['_id'])
if folder:
if folder[-1] == '/':
folder = folder[:-1]
if filename:
filename = '{}/{}'.format(folder, filename)
try:
logger.info('Adding file {} to the GridFS'.format(filename))
return self.fs(resource).put(content, content_type=content_type,
filename=filename, metadata=metadata, **kwargs)
except gridfs.errors.FileExists:
logger.info('File exists filename=%s id=%s' % (filename, kwargs['_id']))
def fs(self, resource=None):
resource = resource or 'upload'
driver = self.app.data.mongo
px = driver.current_mongo_prefix(resource)
if px not in self._fs:
self._fs[px] = gridfs.GridFS(driver.pymongo(prefix=px).db)
return self._fs[px]
def remove_unreferenced_files(self, existing_files, resource=None):
"""Get the files from Grid FS and compare against existing files and delete the orphans."""
current_files = self.fs(resource).find({'_id': {'$nin': list(existing_files)}})
for file_id in (file._id for file in current_files if str(file._id) not in existing_files):
print('Removing unused file: ', file_id)
self.delete(file_id)
print('Image cleaning completed successfully.')
def find(self, folder=None, upload_date=None, resource=None):
"""Search for files in the GridFS
Searches for files in the GridFS using a combination of folder name and/or upload date
comparisons. The upload date comparisons uses the same mongodb BSON comparison operators,
i.e. `$eq`, `$gt`, `$gte`, `$lt`, `$lte` and `$ne`, and can be combined together.
:param str folder: Folder name
:param dict upload_date: Upload date with comparison operator (i.e. $lt, $lte, $gt or $gte)
:param resource: The resource type to use
:return list: List of files that matched the provided parameters
"""
folder_query = {'filename': {'$regex': '^{}/'.format(folder)}} if folder else None
date_query = {'uploadDate': upload_date} if upload_date else None
if folder and upload_date:
query = {'$and': [folder_query, date_query]}
elif folder:
query = folder_query
elif date_query:
query = date_query
else:
query = {}
files = []
for file in self.fs(resource).find(query):
try:
files.append({
'_id': file._id,
'filename': file.filename,
'upload_date': file.upload_date,
'size': file.length,
'_etag': file.md5
})
except AttributeError as e:
logging.warning('Failed to get file attributes. {}'.format(e))
return files
def getFilename(self, media_id):
media, _ = os.path.splitext(media_id)
return media
| agpl-3.0 |
freephys/python_ase | ase/utils/memory.py | 12 | 16716 | import os
import numpy as np
from UserDict import DictMixin
# -------------------------------------------------------------------
class MemoryBase(object, DictMixin):
"""Virtual memory (VM) statistics of the current process
obtained from the relevant entries in /proc/<pid>/status:
VmPeak Peak virtual memory size in bytes.
VmLck ???
VmHWM Peak resident set size ("high water mark") in bytes.
VmRSS Resident memory usage in bytes.
VmSize VM usage of the entire process in bytes.
VmData VM usage of heap in bytes.
VmStk VM usage of stack in bytes.
VmExe VM usage of exe's and statically linked libraries in bytes.
VmLib VM usage of dynamically linked libraries in bytes.
VmPTE ???
Note that VmSize > VmData + VmStk + VmExe + VmLib due to overhead.
"""
_scale = {'KB':1024.0, 'MB':1024.0**2}
_keys = ('VmPeak', 'VmLck', 'VmHWM', 'VmRSS', 'VmSize', 'VmData', \
'VmStk', 'VmExe', 'VmLib', 'VmPTE')
def __init__(self, verbose=0):
self.verbose = verbose
if self.verbose>=2: print 'MemoryBase.__init__'
object.__init__(self)
self._values = np.empty(len(self._keys), dtype=np.float)
def __repr__(self):
"""Return a representation of recorded VM statistics.
x.__repr__() <==> repr(x)"""
if self.verbose>=2: print 'MemoryBase.__repr__'
s = object.__repr__(self)
w = max(map(len, self._keys))
unit = 'MB'
for k,v in self.iteritems():
res = '<N/A>'
if not np.isnan(v):
res = '%8.3f %s' % (v/self._scale[unit], unit)
s += '\n\t' + k.ljust(w) + ': ' + res.rjust(8)
return s
def __len__(self):
"""Number of VM keys which have not been outdated.
x.__len__() <==> len(x)"""
if self.verbose>=3: print 'MemoryBase.__len__'
return np.sum(~np.isnan(self._values))
def __getitem__(self, key):
"""Return floating point number associated with a VM key.
x.__getitem__(y) <==> x[y]"""
if self.verbose>=2: print 'MemoryBase.__getitem__'
if key not in self:
raise KeyError(key)
i = self.keys().index(key)
return self._values[i]
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <==> x[i]=y"""
if self.verbose>=2: print 'MemoryBase.__setitem__'
raise Exception('Virtual member function.')
def __delitem__(self, key):
"""x.__delitem__(y) <==> del x[y]"""
if self.verbose>=2: print 'MemoryBase.__delitem__'
raise Exception('Virtual member function.')
def clear(self):
"""D.clear() -> None. Remove all items from D."""
if self.verbose>=1: print 'MemoryBase.clear'
raise Exception('Virtual member function.')
def update(self, other=None):
"""D.update(E) -> None. Update D from E: for k in E.keys(): D[k] = E[k]"""
if self.verbose>=1: print 'MemoryBase.update'
DictMixin.update(self, other)
def copy(self):
"""Return a shallow copy of a VM statistics instance.
D.copy() -> a shallow copy of D"""
if self.verbose>=1: print 'MemoryBase.copy'
res = object.__new__(self.__class__)
MemoryBase.__init__(res, self.verbose)
DictMixin.update(res, self)
return res
def has_key(self, key): #necessary to avoid infinite recursion
"""Return boolean to indicate whether key is a supported VM key.
D.has_key(k) -> True if D has a key k, else False"""
if self.verbose>=3: print 'MemoryBase.has_key'
return key in self._keys
def keys(self):
"""Return list of supported VM keys.
D.keys() -> list of D's keys"""
if self.verbose>=3: print 'MemoryBase.keys'
return list(self._keys)
def values(self):
"""Return list of recorded VM statistics.
D.values() -> list of D's values"""
if self.verbose>=3: print 'MemoryBase.values'
return list(self._values)
def get(self, key, default=None):
"""Return floating point number associated with a VM key.
D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
if self.verbose>=1: print 'MemoryBase.get'
v = self[key]
if type(default) in [int,float]:
default = np.float_(default)
if default is not None and not isinstance(default, np.floating):
raise ValueError('Default value must be a floating point number.')
if default is not None and np.isnan(v):
return default
else:
return v
def setdefault(self, key, default=None):
"""Return floating point number associated with a VM key.
D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D"""
if self.verbose>=1: print 'MemoryBase.setdefault'
v = self[key]
if type(default) in [int,float]:
default = np.float_(default)
if default is not None and not isinstance(default, np.floating):
raise ValueError('Default value must be a floating point number.')
if default is not None and np.isnan(v):
self[key] = default
return default
else:
return v
def pop(self, key, default=None):
"""Return floating point number for a VM key and mark it as outdated.
D.pop(k[,d]) -> v, remove specified key and return the corresponding value
If key is not found, d is returned if given, otherwise KeyError is raised"""
if self.verbose>=1: print 'MemoryBase.pop'
v = self[key]
if type(default) in [int,float]:
default = np.float_(default)
if default is not None and not isinstance(default, np.floating):
raise ValueError('Default value must be a floating point number.')
if default is not None and np.isnan(v):
return default
else:
del self[key]
return v
def popitem(self):
"""Return floating point number for some not-yet outdated VM key.
D.popitem() -> (k, v), remove and return some (key, value) pair as a
2-tuple; but raise KeyError if D is empty"""
if self.verbose>=1: print 'MemoryBase.popitem'
for k,v in self.iteritems():
if not np.isnan(v):
del self[k]
return (k,v)
raise KeyError
def __add__(self, other):
"""x.__add__(y) <==> x+y"""
if self.verbose>=1: print 'MemoryBase.__add__(%s,%s)' \
% (object.__repr__(self), object.__repr__(other))
res = self.copy()
if isinstance(other, MemoryBase):
res._values.__iadd__(other._values)
elif type(other) in [int,float]:
res._values.__iadd__(other)
else:
raise TypeError('Unsupported operand type')
return res
def __sub__(self, other):
"""x.__sub__(y) <==> x-y"""
if self.verbose>=1: print 'MemoryBase.__sub__(%s,%s)' \
% (object.__repr__(self), object.__repr__(other))
res = self.copy()
if isinstance(other, MemoryBase):
res._values.__isub__(other._values)
elif type(other) in [int,float]:
res._values.__isub__(other)
else:
raise TypeError('Unsupported operand type')
return res
def __radd__(self, other):
"""x.__radd__(y) <==> y+x"""
if self.verbose>=1: print 'MemoryBase.__radd__(%s,%s)' \
% (object.__repr__(self), object.__repr__(other))
res = self.copy()
if isinstance(other, MemoryBase):
res._values.__iadd__(other._values)
elif type(other) in [int,float]:
res._values.__iadd__(other)
else:
raise TypeError('Unsupported operand type')
return res
def __rsub__(self, other):
"""x.__rsub__(y) <==> y-x"""
if self.verbose>=1: print 'MemoryBase.__rsub__(%s,%s)' \
% (object.__repr__(self), object.__repr__(other))
res = self.copy()
res._values.__imul__(-1.0)
if isinstance(other, MemoryBase):
res._values.__iadd__(other._values)
elif type(other) in [int,float]:
res._values.__iadd__(other)
else:
raise TypeError('Unsupported operand type')
return res
# -------------------------------------------------------------------
class MemoryStatistics(MemoryBase):
def __init__(self, verbose=0):
MemoryBase.__init__(self, verbose)
self.update()
def __setitem__(self, key, value):
"""Set VM key to a floating point number.
x.__setitem__(i, y) <==> x[i]=y"""
if self.verbose>=2: print 'MemoryStatistics.__setitem__'
if key not in self:
raise KeyError(key)
if type(value) in [int,float]:
value = np.float_(value)
if not isinstance(value, np.floating):
raise ValueError('Value must be a floating point number.')
i = self.keys().index(key)
self._values[i] = value
def __delitem__(self, key):
"""Mark a VK key as outdated.
x.__delitem__(y) <==> del x[y]"""
if self.verbose>=2: print 'MemoryStatistics.__delitem__'
if key not in self:
raise KeyError(key)
self[key] = np.nan
def clear(self):
"""Mark all supported VM keys as outdated.
D.clear() -> None. Remove all items from D."""
if self.verbose>=1: print 'MemoryStatistics.clear'
self._values[:] = np.nan
def refresh(self):
"""Refresh all outdated VM keys by reading /proc/<pid>/status."""
if self.verbose>=1: print 'MemoryBase.refresh'
# NB: Linux /proc is for humans; Solaris /proc is for programs!
# TODO: Use pipe from 'prstat -p <pid>' or 'pmap -x <pid> 1 1'
# Skip refresh if none are outdated (i.e. nan)
if not np.isnan(self._values).any():
if self.verbose>=2: print 'refresh: skipping...'
return
try:
f = open('/proc/%d/status' % os.getpid(), 'r')
for line in f:
k, v = line.decode('ascii').split(':')
# Only refresh supported keys that are outdated (i.e. nan)
if k in self and np.isnan(self[k]):
t, s = v.strip().split(None, 1)
if self.verbose >= 2:
print 'refresh: k=%s, t=%s, s=%s' % (k, t, s)
self[k] = float(t) * self._scale[s.upper()]
f.close()
except (IOError, UnicodeError, ValueError):
# Reset on error
self.clear()
def update(self, other=None):
"""Update VM statistics from a supplied dict, else clear and refresh.
D.update(E) -> None. Update D from E: for k in E.keys(): D[k] = E[k]"""
if self.verbose>=1: print 'MemoryStatistics.update'
# Call to update without arguments has special meaning
if other is None:
self.clear()
self.refresh()
else:
MemoryBase.update(self, other)
def __iadd__(self, other):
"""x.__iadd__(y) <==> x+=y"""
if self.verbose>=1: print 'MemoryStatistics.__iadd__(%s,%s)' \
% (object.__repr__(self), object.__repr__(other))
if isinstance(other, MemoryBase):
self._values.__iadd__(other._values)
elif type(other) in [int,float]:
self._values.__iadd__(other)
else:
raise TypeError('Unsupported operand type')
return self
def __isub__(self, other):
"""x.__isub__(y) <==> x-=y"""
if self.verbose>=1: print 'MemoryStatistics.__isub__(%s,%s)' \
% (object.__repr__(self), object.__repr__(other))
if isinstance(other, MemoryBase):
self._values.__isub__(other._values)
elif type(other) in [int,float]:
self._values.__isub__(other)
else:
raise TypeError('Unsupported operand type')
return self
# -------------------------------------------------------------------
#http://www.eecho.info/Echo/python/singleton/
#http://mail.python.org/pipermail/python-list/2007-July/622333.html
class Singleton(object):
"""A Pythonic Singleton object."""
def __new__(cls, *args, **kwargs):
if '_inst' not in vars(cls):
cls._inst = object.__new__(cls, *args, **kwargs)
#cls._inst = super(type, cls).__new__(cls, *args, **kwargs)
return cls._inst
class MemorySingleton(MemoryBase, Singleton):
__doc__ = MemoryBase.__doc__ + """
The singleton variant is immutable once it has been instantiated, which
makes it suitable for recording the initial overhead of starting Python."""
def __init__(self, verbose=0):
if verbose>=1: print 'MemorySingleton.__init__'
if '_values' not in vars(self):
if verbose>=1: print 'MemorySingleton.__init__ FIRST!'
# Hack to circumvent singleton immutability
self.__class__ = MemoryStatistics
self.__init__(verbose)
self.__class__ = MemorySingleton
def __setitem__(self, key, value):
"""Disabled for the singleton.
x.__setitem__(i, y) <==> x[i]=y"""
if self.verbose>=2: print 'MemorySingleton.__setitem__'
raise ReferenceError('Singleton is immutable.')
def __delitem__(self, key):
"""Disabled for the singleton.
x.__delitem__(y) <==> del x[y]"""
if self.verbose>=2: print 'MemorySingleton.__delitem__'
raise ReferenceError('Singleton is immutable.')
def clear(self):
"""Disabled for the singleton.
D.clear() -> None. Remove all items from D."""
if self.verbose>=1: print 'MemorySingleton.clear'
raise ReferenceError('Singleton is immutable.')
def update(self):
"""Disabled for the singleton.
D.update(E) -> None. Update D from E: for k in E.keys(): D[k] = E[k]"""
if self.verbose>=1: print 'MemorySingleton.update'
raise ReferenceError('Singleton is immutable.')
def copy(self):
"""Return a shallow non-singleton copy of a VM statistics instance.
D.copy() -> a shallow copy of D"""
if self.verbose>=1: print 'MemorySingleton.copy'
# Hack to circumvent singleton self-copy
self.__class__ = MemoryStatistics
res = self.copy()
self.__class__ = MemorySingleton
return res
# Make sure singleton is instantiated
MemorySingleton()
# -------------------------------------------------------------------
# Helper functions for leak testing with NumPy arrays
def shapegen(size, ndims, ecc=0.5):
"""Return a generator of an N-dimensional array shape
which approximately contains a given number of elements.
size: int or long in [1,inf[
The total number of elements
ndims=3: int in [1,inf[
The number of dimensions
ecc=0.5: float in ]0,1[
The eccentricity of the distribution
"""
assert type(size) in [int,float] and size>=1
assert type(ndims) is int and ndims>=1
assert type(ecc) in [int,float] and ecc>0 and ecc<1
for i in range(ndims-1):
scale = size**(1.0/(ndims-i))
c = round(np.random.uniform((1-ecc)*scale, 1.0/(1-ecc)*scale))
size/=c
yield c
yield round(size)
def shapeopt(maxseed, size, ndims, ecc=0.5):
"""Return optimal estimate of an N-dimensional array shape
which is closest to containing a given number of elements.
maxseed: int in [1,inf[
The maximal number of seeds to try
size: int or long in [1,inf[
The total number of elements
ndims=3: int in [1,inf[
The number of dimensions
ecc=0.5: float in ]0,1[
The eccentricity of the distribution
"""
assert type(maxseed) is int and maxseed>=1
assert type(size) in [int,float] and size>=1
assert type(ndims) is int and ndims>=1
assert type(ecc) in [int,float] and ecc>0 and ecc<1
digits_best = np.inf
shape_best = None
for seed in range(maxseed):
np.random.seed(seed)
shape = tuple(shapegen(size, ndims, ecc))
if np.prod(shape) == size:
return -np.inf, shape
digits = np.log10(abs(np.prod(shape)-size))
if digits < digits_best:
(digits_best, shape_best) = (digits, shape)
return digits_best, shape_best
| gpl-3.0 |
wglass/lighthouse | tests/peer_tests.py | 1 | 2899 | try:
import unittest2 as unittest
except ImportError:
import unittest
import json
from mock import patch
from lighthouse.peer import Peer
class PeerTests(unittest.TestCase):
def test_default_port(self):
peer = Peer("service02", "1.2.3.4")
self.assertEqual(peer.port, 1024)
@patch("lighthouse.peer.socket")
def test_current_uses_local_machine_values(self, mock_socket):
mock_socket.getfqdn.return_value = "my-host.example.co.biz"
mock_socket.gethostbyname.return_value = "10.10.10.1"
peer = Peer.current()
self.assertEqual(peer.name, "my-host.example.co.biz")
self.assertEqual(peer.ip, "10.10.10.1")
mock_socket.gethostbyname.assert_called_once_with(
"my-host.example.co.biz"
)
def test_serialize(self):
peer = Peer("cluster03", "196.0.0.8", port=3333)
self.assertEqual(
json.loads(peer.serialize()),
{
"name": "cluster03",
"ip": "196.0.0.8",
"port": 3333
}
)
def test_deserialize(self):
peer = Peer.deserialize(
json.dumps({
"name": "cluster03",
"ip": "196.0.0.8",
"port": 3333
})
)
self.assertEqual(peer.name, "cluster03")
self.assertEqual(peer.ip, "196.0.0.8")
self.assertEqual(peer.port, 3333)
def test_deserialize_without_port(self):
peer = Peer.deserialize(
json.dumps({
"name": "cluster03",
"ip": "196.0.0.8",
})
)
self.assertEqual(peer.name, "cluster03")
self.assertEqual(peer.ip, "196.0.0.8")
self.assertEqual(peer.port, 1024)
def test_deserialize_without_name(self):
self.assertRaises(
ValueError,
Peer.deserialize,
json.dumps({
"ip": "196.0.0.8",
"port": 333
})
)
def test_deserialize_without_ip(self):
self.assertRaises(
ValueError,
Peer.deserialize,
json.dumps({
"name": "cluster03",
"port": 333
})
)
def test_equivalence(self):
peer1 = Peer("app01", "10.0.3.10", port=8888)
peer2 = Peer("app01.local", "10.0.3.10", port=8888)
peer3 = Peer("app02", "10.0.3.9", port=8888)
self.assertEqual(peer1, peer2)
self.assertNotEqual(peer1, peer3)
self.assertNotEqual(peer2, peer3)
def test_set_of_peers(self):
peer1 = Peer("app01", "10.0.3.10", port=8888)
peer2 = Peer("app01.local", "10.0.3.10", port=8888)
peer3 = Peer("app02", "10.0.3.9", port=8888)
self.assertEqual(
set([peer1, peer2, peer3]),
set([peer1, peer3]),
)
| apache-2.0 |
janpetras/henley | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/msvs.py | 372 | 124844 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
def _AdjustSourcesForRules(spec, rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(options, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
options: Options provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
spec['msvs_external_builder_out_dir'] = \
options.depth + '/out/$(Configuration)'
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-t',
'clean',
'$(ProjectName)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(options, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
ext = spec.get('product_extension')
msbuild_attributes['TargetExt'] = '.' + ext
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if 'TargetExt' in attributes:
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile('\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_prebuild', 'msvs_postbuild', 'msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
# TODO(jeanluc) Validate & warn that we don't translate
# prebuild = configuration.get('msvs_prebuild')
# postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
Right now, only "Build" and "Clean" targets are generated.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
return [build_target, clean_target]
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = (
'\r\nif %errorlevel% neq 0 exit /b %errorlevel%\r\n'.join(commands))
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| gpl-3.0 |
selboo/paramiko | tests/loop.py | 27 | 2848 | # Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
...
"""
import threading, socket
from paramiko.common import asbytes
class LoopSocket (object):
"""
A LoopSocket looks like a normal socket, but all data written to it is
delivered on the read-end of another LoopSocket, and vice versa. It's
like a software "socketpair".
"""
def __init__(self):
self.__in_buffer = bytes()
self.__lock = threading.Lock()
self.__cv = threading.Condition(self.__lock)
self.__timeout = None
self.__mate = None
def close(self):
self.__unlink()
try:
self.__lock.acquire()
self.__in_buffer = bytes()
finally:
self.__lock.release()
def send(self, data):
data = asbytes(data)
if self.__mate is None:
# EOF
raise EOFError()
self.__mate.__feed(data)
return len(data)
def recv(self, n):
self.__lock.acquire()
try:
if self.__mate is None:
# EOF
return bytes()
if len(self.__in_buffer) == 0:
self.__cv.wait(self.__timeout)
if len(self.__in_buffer) == 0:
raise socket.timeout
out = self.__in_buffer[:n]
self.__in_buffer = self.__in_buffer[n:]
return out
finally:
self.__lock.release()
def settimeout(self, n):
self.__timeout = n
def link(self, other):
self.__mate = other
self.__mate.__mate = self
def __feed(self, data):
self.__lock.acquire()
try:
self.__in_buffer += data
self.__cv.notifyAll()
finally:
self.__lock.release()
def __unlink(self):
m = None
self.__lock.acquire()
try:
if self.__mate is not None:
m = self.__mate
self.__mate = None
finally:
self.__lock.release()
if m is not None:
m.__unlink()
| lgpl-2.1 |
dermoth/gramps | gramps/gen/filters/rules/_changedsincebase.py | 1 | 3763 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen.filters.rules/_ChangedSinceBase.py
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
import re
import time
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from . import Rule
from ...errors import FilterError
from ...const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# ChangedSince
#
#-------------------------------------------------------------------------
class ChangedSinceBase(Rule):
"""
Rule that checks for primary objects changed since a specific time.
"""
labels = [ 'Changed after:', 'but before:' ]
name = 'Objects changed after <date time>'
description = "Matches object records changed after a specified " \
"date/time (yyyy-mm-dd hh:mm:ss) or in range, if a second " \
"date/time is given."
category = _('General filters')
def add_time(self, date):
if re.search(r"\d.*\s+\d{1,2}:\d{2}:\d{2}", date):
return date
elif re.search(r"\d.*\s+\d{1,2}:\d{2}", date):
return date + ":00"
elif re.search(r"\d.*\s+\d{1,2}", date):
return date + ":00:00"
elif re.search(r"\d{4}-\d{1,2}-\d{1,2}", date):
return date + " 00:00:00"
elif re.search(r"\d{4}-\d{1,2}", date):
return date + "-01 00:00:00"
elif re.search(r"\d{4}", date):
return date + "-01-01 00:00:00"
else:
return date
def time_str_to_sec(self, time_str):
time_sec = None
iso_date_time = self.add_time(time_str)
try:
time_tup = time.strptime(iso_date_time, "%Y-%m-%d %H:%M:%S")
time_sec = time.mktime(time_tup)
except ValueError:
raise FilterError(
_("Wrong format of date-time"),
_("Only date-times in the iso format of yyyy-mm-dd "
"hh:mm:ss, where the time part is optional, are "
"accepted. %s does not satisfy.") % iso_date_time)
return time_sec
def prepare(self, db, user):
self.since = None
self.before = None
if self.list[0]:
self.since = self.time_str_to_sec(self.list[0])
if self.list[1]:
self.before = self.time_str_to_sec(self.list[1])
def apply(self, db, obj):
obj_time = obj.get_change_time()
if self.since:
if obj_time < self.since:
return False
if self.before:
return obj_time < self.before
return True
if self.before:
return obj_time < self.before
return False
| gpl-2.0 |
Shrhawk/edx-platform | common/djangoapps/performance/views/__init__.py | 100 | 1765 | import datetime
import json
import logging
from django.http import HttpResponse
from track.utils import DateTimeJSONEncoder
perflog = logging.getLogger("perflog")
def _get_request_header(request, header_name, default=''):
"""Helper method to get header values from a request's META dict, if present."""
if request is not None and hasattr(request, 'META') and header_name in request.META:
return request.META[header_name]
else:
return default
def _get_request_value(request, value_name, default=''):
"""Helper method to get header values from a request's REQUEST dict, if present."""
if request is not None and hasattr(request, 'REQUEST') and value_name in request.REQUEST:
return request.REQUEST[value_name]
else:
return default
def performance_log(request):
"""
Log when POST call to "performance" URL is made by a user.
Request should provide "event" and "page" arguments.
"""
event = {
"ip": _get_request_header(request, 'REMOTE_ADDR'),
"referer": _get_request_header(request, 'HTTP_REFERER'),
"accept_language": _get_request_header(request, 'HTTP_ACCEPT_LANGUAGE'),
"event_source": "browser",
"event": _get_request_value(request, 'event'),
"agent": _get_request_header(request, 'HTTP_USER_AGENT'),
"page": _get_request_value(request, 'page'),
"id": _get_request_value(request, 'id'),
"expgroup": _get_request_value(request, 'expgroup'),
"value": _get_request_value(request, 'value'),
"time": datetime.datetime.utcnow(),
"host": _get_request_header(request, 'SERVER_NAME'),
}
perflog.info(json.dumps(event, cls=DateTimeJSONEncoder))
return HttpResponse(status=204)
| agpl-3.0 |
marshall007/rethinkdb | external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-defrelink.py | 210 | 1683 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure a relink is performed when a .def file is touched.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
target = 'test_deffile_dll_ok'
def_contents = test.read('linker-flags/deffile.def')
# This first build makes sure everything is up to date.
test.run_gyp('deffile.gyp', chdir=CHDIR)
test.build('deffile.gyp', target, chdir=CHDIR)
test.up_to_date('deffile.gyp', target, chdir=CHDIR)
def HasExport(binary, export):
full_path = test.built_file_path(binary, chdir=CHDIR)
output = test.run_dumpbin('/exports', full_path)
return export in output
# Verify that only one function is exported.
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
test.fail_test()
# Add AnotherExportedFunction to the def file, then rebuild. If it doesn't
# relink the DLL, then the subsequent check for AnotherExportedFunction will
# fail.
new_def_contents = def_contents + "\n AnotherExportedFunction"
test.write('linker-flags/deffile.def', new_def_contents)
test.build('deffile.gyp', target, chdir=CHDIR)
test.up_to_date('deffile.gyp', target, chdir=CHDIR)
if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
test.fail_test()
if not HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
test.fail_test()
test.pass_test()
| agpl-3.0 |
piffey/ansible | lib/ansible/modules/cloud/cloudscale/cloudscale_floating_ip.py | 49 | 9740 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, Gaudenz Steinlin <gaudenz.steinlin@cloudscale.ch>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudscale_floating_ip
short_description: Manages floating IPs on the cloudscale.ch IaaS service
description:
- Create, assign and delete floating IPs on the cloudscale.ch IaaS service.
- All operations are performed using the cloudscale.ch public API v1.
- "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)."
- A valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at
U(https://control.cloudscale.ch).
notes:
- Instead of the api_token parameter the CLOUDSCALE_API_TOKEN environment variable can be used.
- To create a new floating IP at least the C(ip_version) and C(server) options are required.
- Once a floating_ip is created all parameters except C(server) are read-only.
- It's not possible to request a floating IP without associating it with a server at the same time.
- This module requires the ipaddress python library. This library is included in Python since version 3.3. It is available as a
module on PyPi for earlier versions.
version_added: 2.5
author: "Gaudenz Steinlin (@gaudenz) <gaudenz.steinlin@cloudscale.ch>"
options:
state:
description:
- State of the floating IP.
default: present
choices: [ present, absent ]
ip:
description:
- Floating IP address to change.
- Required to assign the IP to a different server or if I(state) is absent.
aliases: [ network ]
ip_version:
description:
- IP protocol version of the floating IP.
choices: [ 4, 6 ]
server:
description:
- UUID of the server assigned to this floating IP.
- Required unless I(state) is absent.
prefix_length:
description:
- Only valid if I(ip_version) is 6.
- Prefix length for the IPv6 network. Currently only a prefix of /56 can be requested. If no I(prefix_length) is present, a
single address is created.
choices: [ 56 ]
reverse_ptr:
description:
- Reverse PTR entry for this address.
- You cannot set a reverse PTR entry for IPv6 floating networks. Reverse PTR entries are only allowed for single addresses.
api_token:
description:
- cloudscale.ch API token.
- This can also be passed in the CLOUDSCALE_API_TOKEN environment variable.
api_timeout:
description:
- Timeout in seconds for calls to the cloudscale.ch API.
default: 30
'''
EXAMPLES = '''
# Request a new floating IP
- name: Request a floating IP
cloudscale_floating_ip:
ip_version: 4
server: 47cec963-fcd2-482f-bdb6-24461b2d47b1
reverse_ptr: my-server.example.com
api_token: xxxxxx
register: floating_ip
# Assign an existing floating IP to a different server
- name: Move floating IP to backup server
cloudscale_floating_ip:
ip: 192.0.2.123
server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
api_token: xxxxxx
# Request a new floating IPv6 network
- name: Request a floating IP
cloudscale_floating_ip:
ip_version: 6
prefix_length: 56
server: 47cec963-fcd2-482f-bdb6-24461b2d47b1
api_token: xxxxxx
register: floating_ip
# Assign an existing floating network to a different server
- name: Move floating IP to backup server
cloudscale_floating_ip:
ip: '{{ floating_ip.network | ip }}'
server: ea3b39a3-77a8-4d0b-881d-0bb00a1e7f48
api_token: xxxxxx
# Release a floating IP
- name: Release floating IP
cloudscale_floating_ip:
ip: 192.0.2.123
state: absent
api_token: xxxxxx
'''
RETURN = '''
href:
description: The API URL to get details about this floating IP.
returned: success when state == present
type: string
sample: https://api.cloudscale.ch/v1/floating-ips/2001:db8::cafe
network:
description: The CIDR notation of the network that is routed to your server.
returned: success when state == present
type: string
sample: 2001:db8::cafe/128
next_hop:
description: Your floating IP is routed to this IP address.
returned: success when state == present
type: string
sample: 2001:db8:dead:beef::42
reverse_ptr:
description: The reverse pointer for this floating IP address.
returned: success when state == present
type: string
sample: 185-98-122-176.cust.cloudscale.ch
server:
description: The floating IP is routed to this server.
returned: success when state == present
type: string
sample: 47cec963-fcd2-482f-bdb6-24461b2d47b1
ip:
description: The floating IP address or network. This is always present and used to identify floating IPs after creation.
returned: success
type: string
sample: 185.98.122.176
state:
description: The current status of the floating IP.
returned: success
type: string
sample: present
'''
import os
try:
from ipaddress import ip_network
HAS_IPADDRESS = True
except ImportError:
HAS_IPADDRESS = False
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.cloudscale import AnsibleCloudscaleBase, cloudscale_argument_spec
class AnsibleCloudscaleFloatingIP(AnsibleCloudscaleBase):
def __init__(self, module):
super(AnsibleCloudscaleFloatingIP, self).__init__(module)
# Initialize info dict
# Set state to absent, will be updated by self.update_info()
self.info = {'state': 'absent'}
if self._module.params['ip']:
self.update_info()
@staticmethod
def _resp2info(resp):
# If the API response has some content, the floating IP must exist
resp['state'] = 'present'
# Add the IP address to the response, otherwise handling get's to complicated as this
# has to be converted from the network all the time.
resp['ip'] = str(ip_network(resp['network']).network_address)
# Replace the server with just the UUID, the href to the server is useless and just makes
# things more complicated
resp['server'] = resp['server']['uuid']
return resp
def update_info(self):
resp = self._get('floating-ips/' + self._module.params['ip'])
if resp:
self.info = self._resp2info(resp)
else:
self.info = {'ip': self._module.params['ip'],
'state': 'absent'}
def request_floating_ip(self):
params = self._module.params
# check for required parameters to request a floating IP
missing_parameters = []
for p in ('ip_version', 'server'):
if p not in params or not params[p]:
missing_parameters.append(p)
if len(missing_parameters) > 0:
self._module.fail_json(msg='Missing required parameter(s) to request a floating IP: %s.' %
' '.join(missing_parameters))
data = {'ip_version': params['ip_version'],
'server': params['server']}
if params['prefix_length']:
data['prefix_length'] = params['prefix_length']
if params['reverse_ptr']:
data['reverse_ptr'] = params['reverse_ptr']
self.info = self._resp2info(self._post('floating-ips', data))
def release_floating_ip(self):
self._delete('floating-ips/%s' % self._module.params['ip'])
self.info = {'ip': self.info['ip'], 'state': 'absent'}
def update_floating_ip(self):
params = self._module.params
if 'server' not in params or not params['server']:
self._module.fail_json(msg='Missing required parameter to update a floating IP: server.')
self.info = self._resp2info(self._post('floating-ips/%s' % params['ip'], {'server': params['server']}))
def main():
argument_spec = cloudscale_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=('present', 'absent')),
ip=dict(aliases=('network', )),
ip_version=dict(choices=(4, 6), type='int'),
server=dict(),
prefix_length=dict(choices=(56,), type='int'),
reverse_ptr=dict(),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=(('ip', 'ip_version'),),
supports_check_mode=True,
)
if not HAS_IPADDRESS:
module.fail_json(msg='Could not import the python library ipaddress required by this module')
target_state = module.params['state']
target_server = module.params['server']
floating_ip = AnsibleCloudscaleFloatingIP(module)
current_state = floating_ip.info['state']
current_server = floating_ip.info['server'] if 'server' in floating_ip.info else None
if module.check_mode:
module.exit_json(changed=not target_state == current_state or
(current_state == 'present' and current_server != target_server),
**floating_ip.info)
changed = False
if current_state == 'absent' and target_state == 'present':
floating_ip.request_floating_ip()
changed = True
elif current_state == 'present' and target_state == 'absent':
floating_ip.release_floating_ip()
changed = True
elif current_state == 'present' and current_server != target_server:
floating_ip.update_floating_ip()
changed = True
module.exit_json(changed=changed, **floating_ip.info)
if __name__ == '__main__':
main()
| gpl-3.0 |
Dhivyap/ansible | lib/ansible/modules/network/netvisor/pn_vrouter_bgp.py | 26 | 16772 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouter_bgp
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.9"
short_description: CLI command to add/modify/remove vrouter-bgp
description:
- This module can be used to add Border Gateway Protocol neighbor to a vRouter
modify Border Gateway Protocol neighbor to a vRouter and remove Border Gateway Protocol
neighbor from a vRouter.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: false
type: str
state:
description:
- vrouter-bgp configuration command.
required: false
type: str
choices: ['present', 'absent', 'update']
default: 'present'
pn_neighbor:
description:
- IP address for BGP neighbor.
required: true
type: str
pn_vrouter_name:
description:
- name of service config.
required: true
type: str
pn_send_community:
description:
- send any community attribute to neighbor.
required: false
type: bool
pn_weight:
description:
- default weight value between 0 and 65535 for the neighbor's routes.
required: false
pn_multi_protocol:
description:
- Multi-protocol features.
required: false
choices: ['ipv4-unicast', 'ipv6-unicast']
pn_prefix_list_in:
description:
- prefixes used for filtering.
required: false
type: str
pn_route_reflector_client:
description:
- set as route reflector client.
required: false
type: bool
pn_default_originate:
description:
- announce default routes to the neighbor or not.
required: false
type: bool
pn_neighbor_holdtime:
description:
- BGP Holdtime (seconds).
required: false
type: str
pn_connect_retry_interval:
description:
- BGP Connect retry interval (seconds).
required: false
type: str
pn_advertisement_interval:
description:
- Minimum interval between sending BGP routing updates.
required: false
type: str
pn_route_map_out:
description:
- route map out for nbr.
required: false
type: str
pn_update_source:
description:
- IP address of BGP packets required for peering over loopback interface.
required: false
type: str
pn_bfd:
description:
- BFD protocol support for fault detection.
required: false
type: bool
default: False
pn_next_hop_self:
description:
- BGP next hop is self or not.
required: false
type: bool
pn_allowas_in:
description:
- Allow/reject routes with local AS in AS_PATH.
required: false
type: bool
pn_neighbor_keepalive_interval:
description:
- BGP Keepalive interval (seconds).
required: false
type: str
pn_max_prefix:
description:
- maximum number of prefixes.
required: false
type: str
pn_bfd_multihop:
description:
- always use BFD multi-hop port for fault detection.
required: false
type: bool
pn_interface:
description:
- Interface to reach the neighbor.
required: false
type: str
pn_password:
description:
- password for MD5 BGP.
required: false
type: str
pn_route_map_in:
description:
- route map in for nbr.
required: false
type: str
pn_soft_reconfig_inbound:
description:
- soft reset to reconfigure inbound traffic.
required: false
type: bool
pn_override_capability:
description:
- override capability.
required: false
type: bool
pn_max_prefix_warn_only:
description:
- warn if the maximum number of prefixes is exceeded.
required: false
type: bool
pn_ebgp_multihop:
description:
- value for external BGP from 1 to 255.
required: false
type: str
pn_remote_as:
description:
- BGP remote AS from 1 to 4294967295.
required: false
type: str
pn_prefix_list_out:
description:
- prefixes used for filtering outgoing packets.
required: false
type: str
pn_no_route_map_out:
description:
- Remove egress route-map from BGP neighbor.
required: false
type: str
pn_no_route_map_in:
description:
- Remove ingress route-map from BGP neighbor.
required: false
type: str
"""
EXAMPLES = """
- name: "Add BGP to vRouter"
pn_vrouter_bgp:
state: 'present'
pn_vrouter_name: 'sw01-vrouter'
pn_neighbor: '105.104.104.1'
pn_remote_as: 65000
pn_bfd: true
- name: "Remove BGP to vRouter"
pn_vrouter_bgp:
state: 'absent'
pn_vrouter_name: 'sw01-vrouter'
pn_neighbor: '105.104.104.1'
- name: "Modify BGP to vRouter"
pn_vrouter_bgp:
state: 'update'
pn_vrouter_name: 'sw01-vrouter'
pn_neighbor: '105.104.104.1'
pn_remote_as: 65000
pn_bfd: false
pn_allowas_in: true
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the vrouter-bgp command.
returned: always
type: list
stderr:
description: set of error responses from the vrouter-bgp command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli, booleanArgs
from ansible.module_utils.network.netvisor.netvisor import run_commands
def is_valid(module, param_name, param_val, min_val, max_val):
if int(param_val) < min_val or int(param_val) > max_val:
module.fail_json(
failed=True,
msg='Valid %s range is %s to %s' % (param_name, min_val, max_val)
)
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-bgp-show command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If the given neighbor exists on the given vRouter, return NEIGHBOR_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Booleans: VROUTER_EXISTS, NEIGHBOR_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
neighbor = module.params['pn_neighbor']
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers'
out = run_commands(module, check_vrouter)[1]
if out:
out = out.split()
VROUTER_EXISTS = True if vrouter_name in out else False
if neighbor:
# Check for BGP neighbor
show = cli + ' vrouter-bgp-show vrouter-name %s ' % vrouter_name
show += 'format neighbor no-show-headers'
out = run_commands(module, show)[1]
if out and neighbor in out.split():
NEIGHBOR_EXISTS = True
else:
NEIGHBOR_EXISTS = False
return VROUTER_EXISTS, NEIGHBOR_EXISTS
def main():
""" This section is for arguments parsing """
state_map = dict(
present='vrouter-bgp-add',
absent='vrouter-bgp-remove',
update='vrouter-bgp-modify'
)
argument_spec = dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=False, type='str', choices=state_map.keys(), default='present'),
pn_neighbor=dict(required=True, type='str'),
pn_vrouter_name=dict(required=True, type='str'),
pn_send_community=dict(required=False, type='bool'),
pn_weight=dict(required=False, type='str'),
pn_multi_protocol=dict(required=False, type='str', choices=['ipv4-unicast', 'ipv6-unicast']),
pn_prefix_list_in=dict(required=False, type='str'),
pn_route_reflector_client=dict(required=False, type='bool'),
pn_default_originate=dict(required=False, type='bool'),
pn_neighbor_holdtime=dict(required=False, type='str'),
pn_connect_retry_interval=dict(required=False, type='str'),
pn_advertisement_interval=dict(required=False, type='str'),
pn_route_map_out=dict(required=False, type='str'),
pn_update_source=dict(required=False, type='str'),
pn_bfd=dict(required=False, type='bool', default=False),
pn_next_hop_self=dict(required=False, type='bool'),
pn_allowas_in=dict(required=False, type='bool'),
pn_neighbor_keepalive_interval=dict(required=False, type='str'),
pn_max_prefix=dict(required=False, type='str'),
pn_bfd_multihop=dict(required=False, type='bool'),
pn_interface=dict(required=False, type='str'),
pn_password=dict(required=False, type='str', no_log=True),
pn_route_map_in=dict(required=False, type='str'),
pn_soft_reconfig_inbound=dict(required=False, type='bool'),
pn_override_capability=dict(required=False, type='bool'),
pn_max_prefix_warn_only=dict(required=False, type='bool'),
pn_ebgp_multihop=dict(required=False, type='str'),
pn_remote_as=dict(required=False, type='str'),
pn_prefix_list_out=dict(required=False, type='str'),
pn_no_route_map_out=dict(required=False, type='str'),
pn_no_route_map_in=dict(required=False, type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_if=(
["state", "present", ["pn_vrouter_name", "pn_neighbor", "pn_remote_as"]],
["state", "absent", ["pn_vrouter_name", "pn_neighbor"]],
["state", "update", ["pn_vrouter_name", "pn_neighbor"]]
),
required_one_of=[['pn_send_community', 'pn_weight', 'pn_multi_protocol',
'pn_prefix_list_in', 'pn_route_reflector_client', 'pn_default_originate',
'pn_neighbor_holdtime', 'pn_connect_retry_interval', 'pn_advertisement_interval',
'pn_route_map_out', 'pn_update_source', 'pn_bfd',
'pn_next_hop_self', 'pn_allowas_in', 'pn_neighbor_keepalive_interval',
'pn_max_prefix', 'pn_bfd_multihop', 'pn_interface',
'pn_password', 'pn_route_map_in', 'pn_soft_reconfig_inbound',
'pn_override_capability', 'pn_max_prefix_warn_only', 'pn_ebgp_multihop',
'pn_remote_as', 'pn_prefix_list_out', 'pn_no_route_map_out',
'pn_no_route_map_in']],
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
neighbor = module.params['pn_neighbor']
vrouter_name = module.params['pn_vrouter_name']
send_community = module.params['pn_send_community']
weight = module.params['pn_weight']
multi_protocol = module.params['pn_multi_protocol']
prefix_list_in = module.params['pn_prefix_list_in']
route_reflector_client = module.params['pn_route_reflector_client']
default_originate = module.params['pn_default_originate']
neighbor_holdtime = module.params['pn_neighbor_holdtime']
connect_retry_interval = module.params['pn_connect_retry_interval']
advertisement_interval = module.params['pn_advertisement_interval']
route_map_out = module.params['pn_route_map_out']
update_source = module.params['pn_update_source']
bfd = module.params['pn_bfd']
next_hop_self = module.params['pn_next_hop_self']
allowas_in = module.params['pn_allowas_in']
neighbor_keepalive_interval = module.params['pn_neighbor_keepalive_interval']
max_prefix = module.params['pn_max_prefix']
bfd_multihop = module.params['pn_bfd_multihop']
interface = module.params['pn_interface']
password = module.params['pn_password']
route_map_in = module.params['pn_route_map_in']
soft_reconfig_inbound = module.params['pn_soft_reconfig_inbound']
override_capability = module.params['pn_override_capability']
max_prefix_warn_only = module.params['pn_max_prefix_warn_only']
ebgp_multihop = module.params['pn_ebgp_multihop']
remote_as = module.params['pn_remote_as']
prefix_list_out = module.params['pn_prefix_list_out']
no_route_map_out = module.params['pn_no_route_map_out']
no_route_map_in = module.params['pn_no_route_map_in']
command = state_map[state]
if weight and weight != 'none':
if int(weight) < 1 or int(weight) > 65535:
module.fail_json(
failed=True,
msg='Valid weight range is 1 to 65535'
)
# Building the CLI command string
cli = pn_cli(module, cliswitch)
VROUTER_EXISTS, NEIGHBOR_EXISTS = check_cli(module, cli)
if state:
if VROUTER_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter %s does not exist' % vrouter_name
)
if command == 'vrouter-bgp-remove' or command == 'vrouter-bgp-modify':
if NEIGHBOR_EXISTS is False:
module.exit_json(
skipped=True,
msg='BGP neighbor with IP %s does not exist on %s' % (neighbor, vrouter_name)
)
if command == 'vrouter-bgp-add':
if NEIGHBOR_EXISTS is True:
module.exit_json(
skipped=True,
msg='BGP neighbor with IP %s already exists on %s' % (neighbor, vrouter_name)
)
cli += ' %s vrouter-name %s neighbor %s ' % (command, vrouter_name, neighbor)
if command == 'vrouter-bgp-add' or command == 'vrouter-bgp-modify':
if weight:
cli += ' weight ' + weight
if multi_protocol:
cli += ' multi-protocol ' + multi_protocol
if prefix_list_in:
cli += ' prefix-list-in ' + prefix_list_in
if neighbor_holdtime:
is_valid(module, 'neighbor holdtime', neighbor_holdtime, '0', '65535')
cli += ' neighbor-holdtime ' + neighbor_holdtime
if connect_retry_interval:
is_valid(module, 'connect retry interval', connect_retry_interval, '0', '65535')
cli += ' connect-retry-interval ' + connect_retry_interval
if advertisement_interval:
is_valid(module, 'advertisement interval', advertisement_interval, '0', '65535')
cli += ' advertisement-interval ' + advertisement_interval
if route_map_out:
cli += ' route-map-out ' + route_map_out
if update_source:
cli += ' update-source ' + update_source
if neighbor_keepalive_interval:
is_valid(module, 'neighbor keepalive interval', neighbor_keepalive_interval, '0', '65535')
cli += ' neighbor-keepalive-interval ' + neighbor_keepalive_interval
if max_prefix:
cli += ' max-prefix ' + max_prefix
if interface:
cli += ' interface ' + interface
if password:
cli += ' password ' + password
if route_map_in:
cli += ' route-map-in ' + route_map_in
if ebgp_multihop:
is_valid(module, 'ebgp_multihop', ebgp_multihop, '1', '255')
cli += ' ebgp-multihop ' + ebgp_multihop
if remote_as:
cli += ' remote-as ' + remote_as
if prefix_list_out:
cli += ' prefix-list-out ' + prefix_list_out
cli += booleanArgs(send_community, 'send-community', 'no-send-community')
cli += booleanArgs(route_reflector_client, 'route-reflector-client', 'no-route-reflector-client')
cli += booleanArgs(default_originate, 'default-originate', 'no-default-originate')
cli += booleanArgs(bfd, 'bfd', 'no-bfd')
cli += booleanArgs(next_hop_self, 'next-hop-self', 'no-next-hop-self')
cli += booleanArgs(allowas_in, 'allowas-in', 'no-allowas-in')
cli += booleanArgs(bfd_multihop, 'bfd-multihop', 'no-bfd-multihop')
cli += booleanArgs(soft_reconfig_inbound, 'soft-reconfig-inbound', 'no-soft-reconfig-inbound')
cli += booleanArgs(override_capability, 'override-capability', 'no-override-capability')
cli += booleanArgs(max_prefix_warn_only, 'max-prefix-warn-only', 'no-max-prefix-warn-only')
if command == 'vrouter-bgp-modify':
if no_route_map_out:
cli += ' no-route-map-out ' + no_route_map_out
if no_route_map_in:
cli += ' no-route-map-in ' + no_route_map_in
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
rolandmansilla/microblog | flask/lib/python2.7/site-packages/jinja2/sandbox.py | 324 | 13327 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import types
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2._compat import string_types, PY2
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
if PY2:
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
else:
# On versions > python 2 the special attributes on functions are gone,
# but they remain on methods and generators for whatever reason.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
#: unsafe generator attirbutes.
UNSAFE_GENERATOR_ATTRIBUTES = set(['gi_frame', 'gi_code'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, string_types):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| bsd-3-clause |
lbishal/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
Yong-Lee/django | tests/template_tests/syntax_tests/test_cache.py | 299 | 6777 | from django.core.cache import cache
from django.template import Context, Engine, TemplateSyntaxError
from django.test import SimpleTestCase, override_settings
from ..utils import setup
class CacheTagTests(SimpleTestCase):
libraries = {
'cache': 'django.templatetags.cache',
'custom': 'template_tests.templatetags.custom',
}
def tearDown(self):
cache.clear()
@setup({'cache03': '{% load cache %}{% cache 2 test %}cache03{% endcache %}'})
def test_cache03(self):
output = self.engine.render_to_string('cache03')
self.assertEqual(output, 'cache03')
@setup({
'cache03': '{% load cache %}{% cache 2 test %}cache03{% endcache %}',
'cache04': '{% load cache %}{% cache 2 test %}cache04{% endcache %}',
})
def test_cache04(self):
self.engine.render_to_string('cache03')
output = self.engine.render_to_string('cache04')
self.assertEqual(output, 'cache03')
@setup({'cache05': '{% load cache %}{% cache 2 test foo %}cache05{% endcache %}'})
def test_cache05(self):
output = self.engine.render_to_string('cache05', {'foo': 1})
self.assertEqual(output, 'cache05')
@setup({'cache06': '{% load cache %}{% cache 2 test foo %}cache06{% endcache %}'})
def test_cache06(self):
output = self.engine.render_to_string('cache06', {'foo': 2})
self.assertEqual(output, 'cache06')
@setup({
'cache05': '{% load cache %}{% cache 2 test foo %}cache05{% endcache %}',
'cache07': '{% load cache %}{% cache 2 test foo %}cache07{% endcache %}',
})
def test_cache07(self):
context = {'foo': 1}
self.engine.render_to_string('cache05', context)
output = self.engine.render_to_string('cache07', context)
self.assertEqual(output, 'cache05')
@setup({
'cache06': '{% load cache %}{% cache 2 test foo %}cache06{% endcache %}',
'cache08': '{% load cache %}{% cache time test foo %}cache08{% endcache %}',
})
def test_cache08(self):
"""
Allow first argument to be a variable.
"""
context = {'foo': 2, 'time': 2}
self.engine.render_to_string('cache06', context)
output = self.engine.render_to_string('cache08', context)
self.assertEqual(output, 'cache06')
# Raise exception if we don't have at least 2 args, first one integer.
@setup({'cache11': '{% load cache %}{% cache %}{% endcache %}'})
def test_cache11(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cache11')
@setup({'cache12': '{% load cache %}{% cache 1 %}{% endcache %}'})
def test_cache12(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('cache12')
@setup({'cache13': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache13(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache13')
@setup({'cache14': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache14(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache14', {'foo': 'fail'})
@setup({'cache15': '{% load cache %}{% cache foo bar %}{% endcache %}'})
def test_cache15(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('cache15', {'foo': []})
@setup({'cache16': '{% load cache %}{% cache 1 foo bar %}{% endcache %}'})
def test_cache16(self):
"""
Regression test for #7460.
"""
output = self.engine.render_to_string('cache16', {'foo': 'foo', 'bar': 'with spaces'})
self.assertEqual(output, '')
@setup({'cache17': '{% load cache %}{% cache 10 long_cache_key poem %}Some Content{% endcache %}'})
def test_cache17(self):
"""
Regression test for #11270.
"""
output = self.engine.render_to_string('cache17', {'poem': 'Oh freddled gruntbuggly/'
'Thy micturations are to me/'
'As plurdled gabbleblotchits/'
'On a lurgid bee/'
'That mordiously hath bitled out/'
'Its earted jurtles/'
'Into a rancid festering/'
'Or else I shall rend thee in the gobberwarts'
'with my blurglecruncheon/'
'See if I dont.'})
self.assertEqual(output, 'Some Content')
@setup({'cache18': '{% load cache custom %}{% cache 2|noop:"x y" cache18 %}cache18{% endcache %}'})
def test_cache18(self):
"""
Test whitespace in filter arguments
"""
output = self.engine.render_to_string('cache18')
self.assertEqual(output, 'cache18')
class CacheTests(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(libraries={'cache': 'django.templatetags.cache'})
super(CacheTests, cls).setUpClass()
def test_cache_regression_20130(self):
t = self.engine.from_string('{% load cache %}{% cache 1 regression_20130 %}foo{% endcache %}')
cachenode = t.nodelist[1]
self.assertEqual(cachenode.fragment_name, 'regression_20130')
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'default',
},
'template_fragments': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'fragments',
},
})
def test_cache_fragment_cache(self):
"""
When a cache called "template_fragments" is present, the cache tag
will use it in preference to 'default'
"""
t1 = self.engine.from_string('{% load cache %}{% cache 1 fragment %}foo{% endcache %}')
t2 = self.engine.from_string('{% load cache %}{% cache 1 fragment using="default" %}bar{% endcache %}')
ctx = Context()
o1 = t1.render(ctx)
o2 = t2.render(ctx)
self.assertEqual(o1, 'foo')
self.assertEqual(o2, 'bar')
def test_cache_missing_backend(self):
"""
When a cache that doesn't exist is specified, the cache tag will
raise a TemplateSyntaxError
'"""
t = self.engine.from_string('{% load cache %}{% cache 1 backend using="unknown" %}bar{% endcache %}')
ctx = Context()
with self.assertRaises(TemplateSyntaxError):
t.render(ctx)
| bsd-3-clause |
eerwitt/tensorflow | tensorflow/python/framework/dtypes.py | 12 | 19135 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
In addition, variants of these types with the `_ref` suffix are
defined for reference-typed tensors.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values()
or type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
return (self._type_enum != types_pb2.DT_RESOURCE and
self._type_enum != types_pb2.DT_RESOURCE_REF)
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_bool(self):
"""Returns whether this is a boolean data type"""
return self.base_dtype == bool
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
issubclass(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return self.is_numpy_compatible and issubclass(self.as_numpy_dtype,
np.floating)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16]
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e. (min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
min, max : tuple
Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype]
if clip_negative:
min = 0
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum, other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype # pylint: disable=protected-access
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
@property
def size(self):
if self._type_enum == types_pb2.DT_RESOURCE:
return 1
return np.dtype(self.as_numpy_dtype).itemsize
# Define data type range of numpy dtype
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
float16 = DType(types_pb2.DT_HALF)
half = float16
float32 = DType(types_pb2.DT_FLOAT)
float64 = DType(types_pb2.DT_DOUBLE)
double = float64
int32 = DType(types_pb2.DT_INT32)
uint8 = DType(types_pb2.DT_UINT8)
uint16 = DType(types_pb2.DT_UINT16)
int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
quint8 = DType(types_pb2.DT_QUINT8)
qint16 = DType(types_pb2.DT_QINT16)
quint16 = DType(types_pb2.DT_QUINT16)
qint32 = DType(types_pb2.DT_QINT32)
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
}
_STRING_TO_TF = {value: _INTERN_TABLE[key]
for key, value in _TYPE_TO_STRING.items()}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
types_pb2.DT_QUINT8: _np_quint8,
types_pb2.DT_QINT16: _np_qint16,
types_pb2.DT_QUINT16: _np_quint16,
types_pb2.DT_QINT32: _np_qint32,
types_pb2.DT_BFLOAT16: np.uint16,
# Ref types
types_pb2.DT_HALF_REF: np.float16,
types_pb2.DT_FLOAT_REF: np.float32,
types_pb2.DT_DOUBLE_REF: np.float64,
types_pb2.DT_INT32_REF: np.int32,
types_pb2.DT_UINT8_REF: np.uint8,
types_pb2.DT_UINT16_REF: np.uint16,
types_pb2.DT_INT16_REF: np.int16,
types_pb2.DT_INT8_REF: np.int8,
types_pb2.DT_STRING_REF: np.object,
types_pb2.DT_COMPLEX64_REF: np.complex64,
types_pb2.DT_COMPLEX128_REF: np.complex128,
types_pb2.DT_INT64_REF: np.int64,
types_pb2.DT_BOOL_REF: np.bool,
types_pb2.DT_QINT8_REF: _np_qint8,
types_pb2.DT_QUINT8_REF: _np_quint8,
types_pb2.DT_QINT16_REF: _np_qint16,
types_pb2.DT_QUINT16_REF: _np_quint16,
types_pb2.DT_QINT32_REF: _np_qint32,
types_pb2.DT_BFLOAT16_REF: np.uint16,
}
QUANTIZED_DTYPES = frozenset(
[qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
quint16_ref, qint32_ref])
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType`
object. This may currently be a `tf.DType` object, a
[`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e))
raise TypeError(
"Cannot convert value %r to a TensorFlow DType." % type_value)
| apache-2.0 |
karthik339/Agni | MainDemo/flask/lib/python2.7/site-packages/sqlalchemy/dialects/informix/base.py | 17 | 26186 | # informix/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# coding: gbk
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Informix database.
.. note::
The Informix dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import datetime
from sqlalchemy import sql, schema, exc, pool, util
from sqlalchemy.sql import compiler, text
from sqlalchemy.engine import default, reflection
from sqlalchemy import types as sqltypes
RESERVED_WORDS = set(
["abs", "absolute", "access", "access_method", "acos", "active", "add",
"address", "add_months", "admin", "after", "aggregate", "alignment",
"all", "allocate", "all_rows", "alter", "and", "ansi", "any", "append",
"array", "as", "asc", "ascii", "asin", "at", "atan", "atan2", "attach",
"attributes", "audit", "authentication", "authid", "authorization",
"authorized", "auto", "autofree", "auto_reprepare", "auto_stat_mode",
"avg", "avoid_execute", "avoid_fact", "avoid_full", "avoid_hash",
"avoid_index", "avoid_index_sj", "avoid_multi_index", "avoid_nl",
"avoid_star_join", "avoid_subqf", "based", "before", "begin",
"between", "bigint", "bigserial", "binary", "bitand", "bitandnot",
"bitnot", "bitor", "bitxor", "blob", "blobdir", "boolean", "both",
"bound_impl_pdq", "buffered", "builtin", "by", "byte", "cache", "call",
"cannothash", "cardinality", "cascade", "case", "cast", "ceil", "char",
"character", "character_length", "char_length", "check", "class",
"class_origin", "client", "clob", "clobdir", "close", "cluster",
"clustersize", "cobol", "codeset", "collation", "collection",
"column", "columns", "commit", "committed", "commutator", "component",
"components", "concat", "concurrent", "connect", "connection",
"connection_name", "connect_by_iscycle", "connect_by_isleaf",
"connect_by_rootconst", "constraint", "constraints", "constructor",
"context", "continue", "copy", "cos", "costfunc", "count", "crcols",
"create", "cross", "current", "current_role", "currval", "cursor",
"cycle", "database", "datafiles", "dataskip", "date", "datetime",
"day", "dba", "dbdate", "dbinfo", "dbpassword", "dbsecadm",
"dbservername", "deallocate", "debug", "debugmode", "debug_env", "dec",
"decimal", "declare", "decode", "decrypt_binary", "decrypt_char",
"dec_t", "default", "default_role", "deferred", "deferred_prepare",
"define", "delay", "delete", "deleting", "delimited", "delimiter",
"deluxe", "desc", "describe", "descriptor", "detach", "diagnostics",
"directives", "dirty", "disable", "disabled", "disconnect", "disk",
"distinct", "distributebinary", "distributesreferences",
"distributions", "document", "domain", "donotdistribute", "dormant",
"double", "drop", "dtime_t", "each", "elif", "else", "enabled",
"encryption", "encrypt_aes", "encrypt_tdes", "end", "enum",
"environment", "error", "escape", "exception", "exclusive", "exec",
"execute", "executeanywhere", "exemption", "exists", "exit", "exp",
"explain", "explicit", "express", "expression", "extdirectives",
"extend", "extent", "external", "fact", "false", "far", "fetch",
"file", "filetoblob", "filetoclob", "fillfactor", "filtering", "first",
"first_rows", "fixchar", "fixed", "float", "floor", "flush", "for",
"force", "forced", "force_ddl_exec", "foreach", "foreign", "format",
"format_units", "fortran", "found", "fraction", "fragment",
"fragments", "free", "from", "full", "function", "general", "get",
"gethint", "global", "go", "goto", "grant", "greaterthan",
"greaterthanorequal", "group", "handlesnulls", "hash", "having", "hdr",
"hex", "high", "hint", "hold", "home", "hour", "idslbacreadarray",
"idslbacreadset", "idslbacreadtree", "idslbacrules",
"idslbacwritearray", "idslbacwriteset", "idslbacwritetree",
"idssecuritylabel", "if", "ifx_auto_reprepare", "ifx_batchedread_table",
"ifx_int8_t", "ifx_lo_create_spec_t", "ifx_lo_stat_t", "immediate",
"implicit", "implicit_pdq", "in", "inactive", "increment", "index",
"indexes", "index_all", "index_sj", "indicator", "informix", "init",
"initcap", "inline", "inner", "inout", "insert", "inserting", "instead",
"int", "int8", "integ", "integer", "internal", "internallength",
"interval", "into", "intrvl_t", "is", "iscanonical", "isolation",
"item", "iterator", "java", "join", "keep", "key", "label", "labeleq",
"labelge", "labelglb", "labelgt", "labelle", "labellt", "labellub",
"labeltostring", "language", "last", "last_day", "leading", "left",
"length", "lessthan", "lessthanorequal", "let", "level", "like",
"limit", "list", "listing", "load", "local", "locator", "lock", "locks",
"locopy", "loc_t", "log", "log10", "logn", "long", "loop", "lotofile",
"low", "lower", "lpad", "ltrim", "lvarchar", "matched", "matches",
"max", "maxerrors", "maxlen", "maxvalue", "mdy", "median", "medium",
"memory", "memory_resident", "merge", "message_length", "message_text",
"middle", "min", "minute", "minvalue", "mod", "mode", "moderate",
"modify", "module", "money", "month", "months_between", "mounting",
"multiset", "multi_index", "name", "nchar", "negator", "new", "next",
"nextval", "next_day", "no", "nocache", "nocycle", "nomaxvalue",
"nomigrate", "nominvalue", "none", "non_dim", "non_resident", "noorder",
"normal", "not", "notemplatearg", "notequal", "null", "nullif",
"numeric", "numrows", "numtodsinterval", "numtoyminterval", "nvarchar",
"nvl", "octet_length", "of", "off", "old", "on", "online", "only",
"opaque", "opclass", "open", "optcompind", "optical", "optimization",
"option", "or", "order", "ordered", "out", "outer", "output",
"override", "page", "parallelizable", "parameter", "partition",
"pascal", "passedbyvalue", "password", "pdqpriority", "percaltl_cos",
"pipe", "pli", "pload", "policy", "pow", "power", "precision",
"prepare", "previous", "primary", "prior", "private", "privileges",
"procedure", "properties", "public", "put", "raise", "range", "raw",
"read", "real", "recordend", "references", "referencing", "register",
"rejectfile", "relative", "release", "remainder", "rename",
"reoptimization", "repeatable", "replace", "replication", "reserve",
"resolution", "resource", "restart", "restrict", "resume", "retain",
"retainupdatelocks", "return", "returned_sqlstate", "returning",
"returns", "reuse", "revoke", "right", "robin", "role", "rollback",
"rollforward", "root", "round", "routine", "row", "rowid", "rowids",
"rows", "row_count", "rpad", "rtrim", "rule", "sameas", "samples",
"sampling", "save", "savepoint", "schema", "scroll", "seclabel_by_comp",
"seclabel_by_name", "seclabel_to_char", "second", "secondary",
"section", "secured", "security", "selconst", "select", "selecting",
"selfunc", "selfuncargs", "sequence", "serial", "serial8",
"serializable", "serveruuid", "server_name", "session", "set",
"setsessionauth", "share", "short", "siblings", "signed", "sin",
"sitename", "size", "skall", "skinhibit", "skip", "skshow",
"smallfloat", "smallint", "some", "specific", "sql", "sqlcode",
"sqlcontext", "sqlerror", "sqlstate", "sqlwarning", "sqrt",
"stability", "stack", "standard", "start", "star_join", "statchange",
"statement", "static", "statistics", "statlevel", "status", "stdev",
"step", "stop", "storage", "store", "strategies", "string",
"stringtolabel", "struct", "style", "subclass_origin", "substr",
"substring", "sum", "support", "sync", "synonym", "sysdate",
"sysdbclose", "sysdbopen", "system", "sys_connect_by_path", "table",
"tables", "tan", "task", "temp", "template", "test", "text", "then",
"time", "timeout", "to", "today", "to_char", "to_date",
"to_dsinterval", "to_number", "to_yminterval", "trace", "trailing",
"transaction", "transition", "tree", "trigger", "triggers", "trim",
"true", "trunc", "truncate", "trusted", "type", "typedef", "typeid",
"typename", "typeof", "uid", "uncommitted", "under", "union",
"unique", "units", "unknown", "unload", "unlock", "unsigned",
"update", "updating", "upon", "upper", "usage", "use",
"uselastcommitted", "user", "use_hash", "use_nl", "use_subqf",
"using", "value", "values", "var", "varchar", "variable", "variance",
"variant", "varying", "vercols", "view", "violations", "void",
"volatile", "wait", "warning", "weekday", "when", "whenever", "where",
"while", "with", "without", "work", "write", "writedown", "writeup",
"xadatasource", "xid", "xload", "xunload", "year"
])
class InfoDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if value is not None:
if value.microsecond:
value = value.replace(microsecond=0)
return value
return process
class InfoTime(sqltypes.Time):
def bind_processor(self, dialect):
def process(value):
if value is not None:
if value.microsecond:
value = value.replace(microsecond=0)
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
if isinstance(value, datetime.datetime):
return value.time()
else:
return value
return process
colspecs = {
sqltypes.DateTime : InfoDateTime,
sqltypes.TIMESTAMP: InfoDateTime,
sqltypes.Time: InfoTime,
}
ischema_names = {
0 : sqltypes.CHAR, # CHAR
1 : sqltypes.SMALLINT, # SMALLINT
2 : sqltypes.INTEGER, # INT
3 : sqltypes.FLOAT, # Float
3 : sqltypes.Float, # SmallFloat
5 : sqltypes.DECIMAL, # DECIMAL
6 : sqltypes.Integer, # Serial
7 : sqltypes.DATE, # DATE
8 : sqltypes.Numeric, # MONEY
10 : sqltypes.DATETIME, # DATETIME
11 : sqltypes.LargeBinary, # BYTE
12 : sqltypes.TEXT, # TEXT
13 : sqltypes.VARCHAR, # VARCHAR
15 : sqltypes.NCHAR, # NCHAR
16 : sqltypes.NVARCHAR, # NVARCHAR
17 : sqltypes.Integer, # INT8
18 : sqltypes.Integer, # Serial8
43 : sqltypes.String, # LVARCHAR
-1 : sqltypes.BLOB, # BLOB
-1 : sqltypes.CLOB, # CLOB
}
class InfoTypeCompiler(compiler.GenericTypeCompiler):
def visit_DATETIME(self, type_):
return "DATETIME YEAR TO SECOND"
def visit_TIME(self, type_):
return "DATETIME HOUR TO SECOND"
def visit_TIMESTAMP(self, type_):
return "DATETIME YEAR TO SECOND"
def visit_large_binary(self, type_):
return "BYTE"
def visit_boolean(self, type_):
return "SMALLINT"
class InfoSQLCompiler(compiler.SQLCompiler):
def default_from(self):
return " from systables where tabname = 'systables' "
def get_select_precolumns(self, select):
s = ""
if select._offset:
s += "SKIP %s " % select._offset
if select._limit:
s += "FIRST %s " % select._limit
s += select._distinct and "DISTINCT " or ""
return s
def visit_select(self, select, asfrom=False, parens=True, **kw):
text = compiler.SQLCompiler.visit_select(self, select, asfrom, parens, **kw)
if asfrom and parens and self.dialect.server_version_info < (11,):
#assuming that 11 version doesn't need this, not tested
return "table(multiset" + text + ")"
else:
return text
def limit_clause(self, select):
return ""
def visit_function(self, func, **kw):
if func.name.lower() == 'current_date':
return "today"
elif func.name.lower() == 'current_time':
return "CURRENT HOUR TO SECOND"
elif func.name.lower() in ('current_timestamp', 'now'):
return "CURRENT YEAR TO SECOND"
else:
return compiler.SQLCompiler.visit_function(self, func, **kw)
def visit_mod(self, binary, **kw):
return "MOD(%s, %s)" % (self.process(binary.left), self.process(binary.right))
class InfoDDLCompiler(compiler.DDLCompiler):
def visit_add_constraint(self, create):
preparer = self.preparer
return "ALTER TABLE %s ADD CONSTRAINT %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def get_column_specification(self, column, **kw):
colspec = self.preparer.format_column(column)
first = None
if column.primary_key and column.autoincrement:
try:
first = [c for c in column.table.primary_key.columns
if (c.autoincrement and
isinstance(c.type, sqltypes.Integer) and
not c.foreign_keys)].pop(0)
except IndexError:
pass
if column is first:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def get_column_default_string(self, column):
if (isinstance(column.server_default, schema.DefaultClause) and
isinstance(column.server_default.arg, basestring)):
if isinstance(column.type, (sqltypes.Integer, sqltypes.Numeric)):
return self.sql_compiler.process(text(column.server_default.arg))
return super(InfoDDLCompiler, self).get_column_default_string(column)
### Informix wants the constraint name at the end, hence this ist c&p from sql/compiler.py
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
if constraint.name is not None:
text += " CONSTRAINT %s" % self.preparer.format_constraint(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
remote_table = list(constraint._elements.values())[0].column.table
text = "FOREIGN KEY (%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name, f.parent.quote)
for f in constraint._elements.values()),
preparer.format_table(remote_table),
', '.join(preparer.quote(f.column.name, f.column.quote)
for f in constraint._elements.values())
)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
if constraint.name is not None:
text += " CONSTRAINT %s " % \
preparer.format_constraint(constraint)
return text
def visit_unique_constraint(self, constraint):
text = "UNIQUE (%s)" % (', '.join(self.preparer.quote(c.name, c.quote) for c in constraint))
text += self.define_constraint_deferrability(constraint)
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint)
return text
class InformixIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class InformixDialect(default.DefaultDialect):
name = 'informix'
max_identifier_length = 128 # adjusts at runtime based on server version
type_compiler = InfoTypeCompiler
statement_compiler = InfoSQLCompiler
ddl_compiler = InfoDDLCompiler
colspecs = colspecs
ischema_names = ischema_names
preparer = InformixIdentifierPreparer
default_paramstyle = 'qmark'
def __init__(self, has_transactions=True, *args, **kwargs):
self.has_transactions = has_transactions
default.DefaultDialect.__init__(self, *args, **kwargs)
def initialize(self, connection):
super(InformixDialect, self).initialize(connection)
# http://www.querix.com/support/knowledge-base/error_number_message/error_200
if self.server_version_info < (9, 2):
self.max_identifier_length = 18
else:
self.max_identifier_length = 128
def do_begin(self, connection):
cu = connection.cursor()
cu.execute('SET LOCK MODE TO WAIT')
if self.has_transactions:
cu.execute('SET ISOLATION TO REPEATABLE READ')
def do_commit(self, connection):
if self.has_transactions:
connection.commit()
def do_rollback(self, connection):
if self.has_transactions:
connection.rollback()
def _get_table_names(self, connection, schema, type, **kw):
schema = schema or self.default_schema_name
s = "select tabname, owner from systables where owner=? and tabtype=?"
return [row[0] for row in connection.execute(s, schema, type)]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
return self._get_table_names(connection, schema, 'T', **kw)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
return self._get_table_names(connection, schema, 'V', **kw)
@reflection.cache
def get_schema_names(self, connection, **kw):
s = "select owner from systables"
return [row[0] for row in connection.execute(s)]
def has_table(self, connection, table_name, schema=None):
schema = schema or self.default_schema_name
cursor = connection.execute(
"""select tabname from systables where tabname=? and owner=?""",
table_name, schema)
return cursor.first() is not None
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
schema = schema or self.default_schema_name
c = connection.execute(
"""select colname, coltype, collength, t3.default, t1.colno from
syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
and t3.tabid = t2.tabid and t3.colno = t1.colno
order by t1.colno""", table_name, schema)
primary_cols = self.get_primary_keys(connection, table_name, schema, **kw)
columns = []
rows = c.fetchall()
for name, colattr, collength, default, colno in rows:
name = name.lower()
autoincrement = False
primary_key = False
if name in primary_cols:
primary_key = True
# in 7.31, coltype = 0x000
# ^^-- column type
# ^-- 1 not null, 0 null
not_nullable, coltype = divmod(colattr, 256)
if coltype not in (0, 13) and default:
default = default.split()[-1]
if coltype == 6: # Serial, mark as autoincrement
autoincrement = True
if coltype == 0 or coltype == 13: # char, varchar
coltype = ischema_names[coltype](collength)
if default:
default = "'%s'" % default
elif coltype == 5: # decimal
precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF
if scale == 255:
scale = 0
coltype = sqltypes.Numeric(precision, scale)
else:
try:
coltype = ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NULLTYPE
column_info = dict(name=name, type=coltype, nullable=not not_nullable,
default=default, autoincrement=autoincrement,
primary_key=primary_key)
columns.append(column_info)
return columns
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
schema_sel = schema or self.default_schema_name
c = connection.execute(
"""select t1.constrname as cons_name,
t4.colname as local_column, t7.tabname as remote_table,
t6.colname as remote_column, t7.owner as remote_owner
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4 ,
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
sysconstraints as t8 , sysindexes as t9
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R'
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3,
t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10,
t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
and t5.constrid = t1.constrid and t8.constrid = t5.primary
and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3,
t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10,
t9.part11, t9.part11, t9.part12, t9.part13, t9.part4, t9.part15, t9.part16) and t9.idxname =
t8.idxname
and t7.tabid = t5.ptabid""", table_name, schema_sel)
def fkey_rec():
return {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : None,
'referred_columns' : []
}
fkeys = util.defaultdict(fkey_rec)
rows = c.fetchall()
for cons_name, local_column, \
remote_table, remote_column, remote_owner in rows:
rec = fkeys[cons_name]
rec['name'] = cons_name
local_cols, remote_cols = \
rec['constrained_columns'], rec['referred_columns']
if not rec['referred_table']:
rec['referred_table'] = remote_table
if schema is not None:
rec['referred_schema'] = remote_owner
if local_column not in local_cols:
local_cols.append(local_column)
if remote_column not in remote_cols:
remote_cols.append(remote_column)
return fkeys.values()
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
schema = schema or self.default_schema_name
# Select the column positions from sysindexes for sysconstraints
data = connection.execute(
"""select t2.*
from systables as t1, sysindexes as t2, sysconstraints as t3
where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=?
and t2.idxname=t3.idxname and t3.constrtype='P'""",
table_name, schema
).fetchall()
colpositions = set()
for row in data:
colpos = set([getattr(row, 'part%d' % x) for x in range(1,16)])
colpositions |= colpos
if not len(colpositions):
return []
# Select the column names using the columnpositions
# TODO: Maybe cache a bit of those col infos (eg select all colnames for one table)
place_holder = ','.join('?'*len(colpositions))
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colpositions
).fetchall()
return reduce(lambda x,y: list(x)+list(y), c, [])
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
# TODO: schema...
c = connection.execute(
"""select t1.*
from sysindexes as t1 , systables as t2
where t1.tabid = t2.tabid and t2.tabname=?""",
table_name)
indexes = []
for row in c.fetchall():
colnames = [getattr(row, 'part%d' % x) for x in range(1,16)]
colnames = [x for x in colnames if x]
place_holder = ','.join('?'*len(colnames))
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colnames
).fetchall()
c = reduce(lambda x,y: list(x)+list(y), c, [])
indexes.append({
'name': row.idxname,
'unique': row.idxtype.lower() == 'u',
'column_names': c
})
return indexes
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
schema = schema or self.default_schema_name
c = connection.execute(
"""select t1.viewtext
from sysviews as t1 , systables as t2
where t1.tabid=t2.tabid and t2.tabname=?
and t2.owner=? order by seqno""",
view_name, schema).fetchall()
return ''.join([row[0] for row in c])
def _get_default_schema_name(self, connection):
return connection.execute('select CURRENT_ROLE from systables').scalar()
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_04_01/aio/_container_registry_management_client.py | 1 | 4407 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import RegistriesOperations
from .operations import RunsOperations
from .operations import TasksOperations
from .. import models
class ContainerRegistryManagementClient(object):
"""ContainerRegistryManagementClient.
:ivar registries: RegistriesOperations operations
:vartype registries: azure.mgmt.containerregistry.v2019_04_01.aio.operations.RegistriesOperations
:ivar runs: RunsOperations operations
:vartype runs: azure.mgmt.containerregistry.v2019_04_01.aio.operations.RunsOperations
:ivar tasks: TasksOperations operations
:vartype tasks: azure.mgmt.containerregistry.v2019_04_01.aio.operations.TasksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = ContainerRegistryManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.registries = RegistriesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.runs = RunsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tasks = TasksOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| mit |
JukeboxPipeline/jukebox-core | src/jukeboxcore/gui/widgets/actionreportdialog_ui.py | 1 | 4911 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'h:\projects\jukebox-core\src\jukeboxcore\gui\widgets\actionreportdialog.ui'
#
# Created: Fri Nov 07 16:12:58 2014
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_ActionReportDialog(object):
def setupUi(self, ActionReportDialog):
ActionReportDialog.setObjectName("ActionReportDialog")
ActionReportDialog.setWindowModality(QtCore.Qt.ApplicationModal)
ActionReportDialog.resize(564, 654)
self.verticalLayout = QtGui.QVBoxLayout(ActionReportDialog)
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.status_title_lb = QtGui.QLabel(ActionReportDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.status_title_lb.sizePolicy().hasHeightForWidth())
self.status_title_lb.setSizePolicy(sizePolicy)
self.status_title_lb.setWordWrap(True)
self.status_title_lb.setObjectName("status_title_lb")
self.gridLayout.addWidget(self.status_title_lb, 0, 0, 1, 1)
self.status_lb = QtGui.QLabel(ActionReportDialog)
self.status_lb.setText("")
self.status_lb.setObjectName("status_lb")
self.gridLayout.addWidget(self.status_lb, 0, 1, 1, 2)
self.traceback_pte = QtGui.QPlainTextEdit(ActionReportDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.traceback_pte.sizePolicy().hasHeightForWidth())
self.traceback_pte.setSizePolicy(sizePolicy)
self.traceback_pte.setDocumentTitle("")
self.traceback_pte.setPlainText("")
self.traceback_pte.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.traceback_pte.setObjectName("traceback_pte")
self.gridLayout.addWidget(self.traceback_pte, 3, 0, 1, 3)
self.showtb_checkb = QtGui.QCheckBox(ActionReportDialog)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.showtb_checkb.sizePolicy().hasHeightForWidth())
self.showtb_checkb.setSizePolicy(sizePolicy)
self.showtb_checkb.setChecked(False)
self.showtb_checkb.setObjectName("showtb_checkb")
self.gridLayout.addWidget(self.showtb_checkb, 2, 2, 1, 1)
self.message_lb = QtGui.QLabel(ActionReportDialog)
self.message_lb.setText("")
self.message_lb.setWordWrap(True)
self.message_lb.setObjectName("message_lb")
self.gridLayout.addWidget(self.message_lb, 1, 0, 1, 3)
self.label = QtGui.QLabel(ActionReportDialog)
font = QtGui.QFont()
font.setPointSize(10)
font.setWeight(75)
font.setBold(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 0, 1, 2)
self.verticalLayout.addLayout(self.gridLayout)
self.buttonBox = QtGui.QDialogButtonBox(ActionReportDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Abort|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(True)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(ActionReportDialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), ActionReportDialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), ActionReportDialog.reject)
QtCore.QObject.connect(self.showtb_checkb, QtCore.SIGNAL("toggled(bool)"), self.traceback_pte.setVisible)
QtCore.QMetaObject.connectSlotsByName(ActionReportDialog)
def retranslateUi(self, ActionReportDialog):
ActionReportDialog.setWindowTitle(QtGui.QApplication.translate("ActionReportDialog", "Report", None, QtGui.QApplication.UnicodeUTF8))
self.status_title_lb.setText(QtGui.QApplication.translate("ActionReportDialog", "Status:", None, QtGui.QApplication.UnicodeUTF8))
self.showtb_checkb.setText(QtGui.QApplication.translate("ActionReportDialog", "Show Traceback", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("ActionReportDialog", "Do you wish to continue?", None, QtGui.QApplication.UnicodeUTF8))
| bsd-3-clause |
sgraham/nope | third_party/WebKit/Tools/Scripts/webkitpy/common/system/user_mock.py | 40 | 2381 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_log = logging.getLogger(__name__)
class MockUser(object):
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
return "Mock user response"
@classmethod
def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):
pass
def __init__(self):
self.opened_urls = []
def edit(self, files):
pass
def page(self, message):
pass
def confirm(self, message=None, default='y'):
_log.info(message)
return default == 'y'
def can_open_url(self):
return True
def open_url(self, url):
self.opened_urls.append(url)
if url.startswith("file://"):
_log.info("MOCK: user.open_url: file://...")
return
_log.info("MOCK: user.open_url: %s" % url)
| bsd-3-clause |
ZacharyRSmith/xpython | palindrome-products/palindrome_products_test.py | 11 | 1679 | """
Notes regarding the implementation of smallest_palindrome and
largest_palindrome:
Both functions must take two keyword arguments:
max_factor -- int
min_factor -- int, default 0
Their return value must be a tuple (value, factors) where value is the
palindrome itself, and factors is an iterable containing both factors of the
palindrome in arbitrary order.
"""
import unittest
from palindrome import smallest_palindrome, largest_palindrome
class PalindromesTests(unittest.TestCase):
def test_largest_palindrome_from_single_digit_factors(self):
value, factors = largest_palindrome(max_factor=9)
self.assertEqual(9, value)
self.assertIn(set(factors), [{1, 9}, {3, 3}])
def test_largest_palindrome_from_double_digit_factors(self):
value, factors = largest_palindrome(max_factor=99, min_factor=10)
self.assertEqual(9009, value)
self.assertEqual({91, 99}, set(factors))
def test_smallest_palindrome_from_double_digit_factors(self):
value, factors = smallest_palindrome(max_factor=99, min_factor=10)
self.assertEqual(121, value)
self.assertEqual({11}, set(factors))
def test_largest_palindrome_from_triple_digit_factors(self):
value, factors = largest_palindrome(max_factor=999, min_factor=100)
self.assertEqual(906609, value)
self.assertEqual({913, 993}, set(factors))
def test_smallest_palindrome_from_triple_digit_factors(self):
value, factors = smallest_palindrome(max_factor=999, min_factor=100)
self.assertEqual(10201, value)
self.assertEqual({101, 101}, set(factors))
if __name__ == '__main__':
unittest.main()
| mit |
sporkexec/urmpc | bin/urwid/util.py | 7 | 12807 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid utility functions
# Copyright (C) 2004-2011 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from urwid import escape
from urwid.compat import bytes
str_util = escape.str_util
# bring str_util functions into our namespace
calc_text_pos = str_util.calc_text_pos
calc_width = str_util.calc_width
is_wide_char = str_util.is_wide_char
move_next_char = str_util.move_next_char
move_prev_char = str_util.move_prev_char
within_double_byte = str_util.within_double_byte
def detect_encoding():
# Try to determine if using a supported double-byte encoding
import locale
try:
try:
locale.setlocale(locale.LC_ALL, "")
except locale.Error:
pass
return locale.getlocale()[1] or ""
except ValueError, e:
# with invalid LANG value python will throw ValueError
if e.args and e.args[0].startswith("unknown locale"):
return ""
else:
raise
if 'detected_encoding' not in locals():
detected_encoding = detect_encoding()
else:
assert 0, "It worked!"
_target_encoding = None
_use_dec_special = True
def set_encoding( encoding ):
"""
Set the byte encoding to assume when processing strings and the
encoding to use when converting unicode strings.
"""
encoding = encoding.lower()
global _target_encoding, _use_dec_special
if encoding in ( 'utf-8', 'utf8', 'utf' ):
str_util.set_byte_encoding("utf8")
_use_dec_special = False
elif encoding in ( 'euc-jp' # JISX 0208 only
, 'euc-kr', 'euc-cn', 'euc-tw' # CNS 11643 plain 1 only
, 'gb2312', 'gbk', 'big5', 'cn-gb', 'uhc'
# these shouldn't happen, should they?
, 'eucjp', 'euckr', 'euccn', 'euctw', 'cncb' ):
str_util.set_byte_encoding("wide")
_use_dec_special = True
else:
str_util.set_byte_encoding("narrow")
_use_dec_special = True
# if encoding is valid for conversion from unicode, remember it
_target_encoding = 'ascii'
try:
if encoding:
u"".encode(encoding)
_target_encoding = encoding
except LookupError: pass
def get_encoding_mode():
"""
Get the mode Urwid is using when processing text strings.
Returns 'narrow' for 8-bit encodings, 'wide' for CJK encodings
or 'utf8' for UTF-8 encodings.
"""
return str_util.get_byte_encoding()
def apply_target_encoding( s ):
"""
Return (encoded byte string, character set rle).
"""
if _use_dec_special and type(s) == unicode:
# first convert drawing characters
try:
s = s.translate( escape.DEC_SPECIAL_CHARMAP )
except NotImplementedError:
# python < 2.4 needs to do this the hard way..
for c, alt in zip(escape.DEC_SPECIAL_CHARS,
escape.ALT_DEC_SPECIAL_CHARS):
s = s.replace( c, escape.SO+alt+escape.SI )
if type(s) == unicode:
s = s.replace( escape.SI+escape.SO, u"" ) # remove redundant shifts
s = s.encode( _target_encoding )
assert isinstance(s, bytes)
SO = escape.SO.encode('ascii')
SI = escape.SI.encode('ascii')
sis = s.split(SO)
assert isinstance(sis[0], bytes)
sis0 = sis[0].replace(SI, bytes())
sout = []
cout = []
if sis0:
sout.append( sis0 )
cout.append( (None,len(sis0)) )
if len(sis)==1:
return sis0, cout
for sn in sis[1:]:
assert isinstance(sn, bytes)
assert isinstance(SI, bytes)
sl = sn.split(SI, 1)
if len(sl) == 1:
sin = sl[0]
assert isinstance(sin, bytes)
sout.append(sin)
rle_append_modify(cout, (escape.DEC_TAG.encode('ascii'), len(sin)))
continue
sin, son = sl
son = son.replace(SI, bytes())
if sin:
sout.append(sin)
rle_append_modify(cout, (escape.DEC_TAG, len(sin)))
if son:
sout.append(son)
rle_append_modify(cout, (None, len(son)))
outstr = bytes().join(sout)
return outstr, cout
######################################################################
# Try to set the encoding using the one detected by the locale module
set_encoding( detected_encoding )
######################################################################
def supports_unicode():
"""
Return True if python is able to convert non-ascii unicode strings
to the current encoding.
"""
return _target_encoding and _target_encoding != 'ascii'
def calc_trim_text( text, start_offs, end_offs, start_col, end_col ):
"""
Calculate the result of trimming text.
start_offs -- offset into text to treat as screen column 0
end_offs -- offset into text to treat as the end of the line
start_col -- screen column to trim at the left
end_col -- screen column to trim at the right
Returns (start, end, pad_left, pad_right), where:
start -- resulting start offset
end -- resulting end offset
pad_left -- 0 for no pad or 1 for one space to be added
pad_right -- 0 for no pad or 1 for one space to be added
"""
spos = start_offs
pad_left = pad_right = 0
if start_col > 0:
spos, sc = calc_text_pos( text, spos, end_offs, start_col )
if sc < start_col:
pad_left = 1
spos, sc = calc_text_pos( text, start_offs,
end_offs, start_col+1 )
run = end_col - start_col - pad_left
pos, sc = calc_text_pos( text, spos, end_offs, run )
if sc < run:
pad_right = 1
return ( spos, pos, pad_left, pad_right )
def trim_text_attr_cs( text, attr, cs, start_col, end_col ):
"""
Return ( trimmed text, trimmed attr, trimmed cs ).
"""
spos, epos, pad_left, pad_right = calc_trim_text(
text, 0, len(text), start_col, end_col )
attrtr = rle_subseg( attr, spos, epos )
cstr = rle_subseg( cs, spos, epos )
if pad_left:
al = rle_get_at( attr, spos-1 )
rle_append_beginning_modify( attrtr, (al, 1) )
rle_append_beginning_modify( cstr, (None, 1) )
if pad_right:
al = rle_get_at( attr, epos )
rle_append_modify( attrtr, (al, 1) )
rle_append_modify( cstr, (None, 1) )
return (bytes().rjust(pad_left) + text[spos:epos] +
bytes().rjust(pad_right), attrtr, cstr)
def rle_get_at( rle, pos ):
"""
Return the attribute at offset pos.
"""
x = 0
if pos < 0:
return None
for a, run in rle:
if x+run > pos:
return a
x += run
return None
def rle_subseg( rle, start, end ):
"""Return a sub segment of an rle list."""
l = []
x = 0
for a, run in rle:
if start:
if start >= run:
start -= run
x += run
continue
x += start
run -= start
start = 0
if x >= end:
break
if x+run > end:
run = end-x
x += run
l.append( (a, run) )
return l
def rle_len( rle ):
"""
Return the number of characters covered by a run length
encoded attribute list.
"""
run = 0
for v in rle:
assert type(v) == tuple, repr(rle)
a, r = v
run += r
return run
def rle_append_beginning_modify( rle, (a, r) ):
"""
Append (a, r) to BEGINNING of rle.
Merge with first run when possible
MODIFIES rle parameter contents. Returns None.
"""
if not rle:
rle[:] = [(a, r)]
else:
al, run = rle[0]
if a == al:
rle[0] = (a,run+r)
else:
rle[0:0] = [(al, r)]
def rle_append_modify( rle, (a, r) ):
"""
Append (a,r) to the rle list rle.
Merge with last run when possible.
MODIFIES rle parameter contents. Returns None.
"""
if not rle or rle[-1][0] != a:
rle.append( (a,r) )
return
la,lr = rle[-1]
rle[-1] = (a, lr+r)
def rle_join_modify( rle, rle2 ):
"""
Append attribute list rle2 to rle.
Merge last run of rle with first run of rle2 when possible.
MODIFIES attr parameter contents. Returns None.
"""
if not rle2:
return
rle_append_modify(rle, rle2[0])
rle += rle2[1:]
def rle_product( rle1, rle2 ):
"""
Merge the runs of rle1 and rle2 like this:
eg.
rle1 = [ ("a", 10), ("b", 5) ]
rle2 = [ ("Q", 5), ("P", 10) ]
rle_product: [ (("a","Q"), 5), (("a","P"), 5), (("b","P"), 5) ]
rle1 and rle2 are assumed to cover the same total run.
"""
i1 = i2 = 1 # rle1, rle2 indexes
if not rle1 or not rle2: return []
a1, r1 = rle1[0]
a2, r2 = rle2[0]
l = []
while r1 and r2:
r = min(r1, r2)
rle_append_modify( l, ((a1,a2),r) )
r1 -= r
if r1 == 0 and i1< len(rle1):
a1, r1 = rle1[i1]
i1 += 1
r2 -= r
if r2 == 0 and i2< len(rle2):
a2, r2 = rle2[i2]
i2 += 1
return l
def rle_factor( rle ):
"""
Inverse of rle_product.
"""
rle1 = []
rle2 = []
for (a1, a2), r in rle:
rle_append_modify( rle1, (a1, r) )
rle_append_modify( rle2, (a2, r) )
return rle1, rle2
class TagMarkupException(Exception): pass
def decompose_tagmarkup(tm):
"""Return (text string, attribute list) for tagmarkup passed."""
tl, al = _tagmarkup_recurse(tm, None)
# join as unicode or bytes based on type of first element
text = tl[0][:0].join(tl)
if al and al[-1][0] is None:
del al[-1]
return text, al
def _tagmarkup_recurse( tm, attr ):
"""Return (text list, attribute list) for tagmarkup passed.
tm -- tagmarkup
attr -- current attribute or None"""
if type(tm) == list:
# for lists recurse to process each subelement
rtl = []
ral = []
for element in tm:
tl, al = _tagmarkup_recurse( element, attr )
if ral:
# merge attributes when possible
last_attr, last_run = ral[-1]
top_attr, top_run = al[0]
if last_attr == top_attr:
ral[-1] = (top_attr, last_run + top_run)
del al[-1]
rtl += tl
ral += al
return rtl, ral
if type(tm) == tuple:
# tuples mark a new attribute boundary
if len(tm) != 2:
raise TagMarkupException, "Tuples must be in the form (attribute, tagmarkup): %r" % (tm,)
attr, element = tm
return _tagmarkup_recurse( element, attr )
if not isinstance(tm,(basestring, bytes)):
raise TagMarkupException, "Invalid markup element: %r" % tm
# text
return [tm], [(attr, len(tm))]
def is_mouse_event( ev ):
return type(ev) == tuple and len(ev)==4 and ev[0].find("mouse")>=0
def is_mouse_press( ev ):
return ev.find("press")>=0
class MetaSuper(type):
"""adding .__super"""
def __init__(cls, name, bases, d):
super(MetaSuper, cls).__init__(name, bases, d)
if hasattr(cls, "_%s__super" % name):
raise AttributeError, "Class has same name as one of its super classes"
setattr(cls, "_%s__super" % name, super(cls))
def int_scale(val, val_range, out_range):
"""
Scale val in the range [0, val_range-1] to an integer in the range
[0, out_range-1]. This implementaton uses the "round-half-up" rounding
method.
>>> "%x" % int_scale(0x7, 0x10, 0x10000)
'7777'
>>> "%x" % int_scale(0x5f, 0x100, 0x10)
'6'
>>> int_scale(2, 6, 101)
40
>>> int_scale(1, 3, 4)
2
"""
num = int(val * (out_range-1) * 2 + (val_range-1))
dem = ((val_range-1) * 2)
# if num % dem == 0 then we are exactly half-way and have rounded up.
return num // dem
| gpl-3.0 |
resmo/ansible | lib/ansible/modules/network/fortimanager/fmgr_fwpol_ipv4.py | 21 | 49820 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_fwpol_ipv4
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Allows the add/delete of Firewall Policies on Packages in FortiManager.
description:
- Allows the add/delete of Firewall Policies on Packages in FortiManager.
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
package_name:
description:
- The policy package you want to modify
required: false
default: "default"
fail_on_missing_dependency:
description:
- Normal behavior is to "skip" tasks that fail dependency checks, so other tasks can run.
- If set to "enabled" if a failed dependency check happeens, Ansible will exit as with failure instead of skip.
required: false
default: "disable"
choices: ["enable", "disable"]
version_added: "2.9"
wsso:
description:
- Enable/disable WiFi Single Sign On (WSSO).
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
webfilter_profile:
description:
- Name of an existing Web filter profile.
required: false
webcache_https:
description:
- Enable/disable web cache for HTTPS.
- choice | disable | Disable web cache for HTTPS.
- choice | enable | Enable web cache for HTTPS.
required: false
choices: ["disable", "enable"]
webcache:
description:
- Enable/disable web cache.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
wccp:
description:
- Enable/disable forwarding traffic matching this policy to a configured WCCP server.
- choice | disable | Disable WCCP setting.
- choice | enable | Enable WCCP setting.
required: false
choices: ["disable", "enable"]
wanopt_profile:
description:
- WAN optimization profile.
required: false
wanopt_peer:
description:
- WAN optimization peer.
required: false
wanopt_passive_opt:
description:
- WAN optimization passive mode options. This option decides what IP address will be used to connect server.
- choice | default | Allow client side WAN opt peer to decide.
- choice | transparent | Use address of client to connect to server.
- choice | non-transparent | Use local FortiGate address to connect to server.
required: false
choices: ["default", "transparent", "non-transparent"]
wanopt_detection:
description:
- WAN optimization auto-detection mode.
- choice | active | Active WAN optimization peer auto-detection.
- choice | passive | Passive WAN optimization peer auto-detection.
- choice | off | Turn off WAN optimization peer auto-detection.
required: false
choices: ["active", "passive", "off"]
wanopt:
description:
- Enable/disable WAN optimization.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
waf_profile:
description:
- Name of an existing Web application firewall profile.
required: false
vpntunnel:
description:
- Policy-based IPsec VPN | name of the IPsec VPN Phase 1.
required: false
voip_profile:
description:
- Name of an existing VoIP profile.
required: false
vlan_filter:
description:
- Set VLAN filters.
required: false
vlan_cos_rev:
description:
- VLAN reverse direction user priority | 255 passthrough, 0 lowest, 7 highest..
required: false
vlan_cos_fwd:
description:
- VLAN forward direction user priority | 255 passthrough, 0 lowest, 7 highest.
required: false
utm_status:
description:
- Enable to add one or more security profiles (AV, IPS, etc.) to the firewall policy.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
users:
description:
- Names of individual users that can authenticate with this policy.
required: false
url_category:
description:
- URL category ID list.
required: false
traffic_shaper_reverse:
description:
- Reverse traffic shaper.
required: false
traffic_shaper:
description:
- Traffic shaper.
required: false
timeout_send_rst:
description:
- Enable/disable sending RST packets when TCP sessions expire.
- choice | disable | Disable sending of RST packet upon TCP session expiration.
- choice | enable | Enable sending of RST packet upon TCP session expiration.
required: false
choices: ["disable", "enable"]
tcp_session_without_syn:
description:
- Enable/disable creation of TCP session without SYN flag.
- choice | all | Enable TCP session without SYN.
- choice | data-only | Enable TCP session data only.
- choice | disable | Disable TCP session without SYN.
required: false
choices: ["all", "data-only", "disable"]
tcp_mss_sender:
description:
- Sender TCP maximum segment size (MSS).
required: false
tcp_mss_receiver:
description:
- Receiver TCP maximum segment size (MSS).
required: false
status:
description:
- Enable or disable this policy.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
ssl_ssh_profile:
description:
- Name of an existing SSL SSH profile.
required: false
ssl_mirror_intf:
description:
- SSL mirror interface name.
required: false
ssl_mirror:
description:
- Enable to copy decrypted SSL traffic to a FortiGate interface (called SSL mirroring).
- choice | disable | Disable SSL mirror.
- choice | enable | Enable SSL mirror.
required: false
choices: ["disable", "enable"]
ssh_filter_profile:
description:
- Name of an existing SSH filter profile.
required: false
srcintf:
description:
- Incoming (ingress) interface.
required: false
srcaddr_negate:
description:
- When enabled srcaddr specifies what the source address must NOT be.
- choice | disable | Disable source address negate.
- choice | enable | Enable source address negate.
required: false
choices: ["disable", "enable"]
srcaddr:
description:
- Source address and address group names.
required: false
spamfilter_profile:
description:
- Name of an existing Spam filter profile.
required: false
session_ttl:
description:
- TTL in seconds for sessions accepted by this policy (0 means use the system default session TTL).
required: false
service_negate:
description:
- When enabled service specifies what the service must NOT be.
- choice | disable | Disable negated service match.
- choice | enable | Enable negated service match.
required: false
choices: ["disable", "enable"]
service:
description:
- Service and service group names.
required: false
send_deny_packet:
description:
- Enable to send a reply when a session is denied or blocked by a firewall policy.
- choice | disable | Disable deny-packet sending.
- choice | enable | Enable deny-packet sending.
required: false
choices: ["disable", "enable"]
schedule_timeout:
description:
- Enable to force current sessions to end when the schedule object times out.
- choice | disable | Disable schedule timeout.
- choice | enable | Enable schedule timeout.
required: false
choices: ["disable", "enable"]
schedule:
description:
- Schedule name.
required: false
scan_botnet_connections:
description:
- Block or monitor connections to Botnet servers or disable Botnet scanning.
- choice | disable | Do not scan connections to botnet servers.
- choice | block | Block connections to botnet servers.
- choice | monitor | Log connections to botnet servers.
required: false
choices: ["disable", "block", "monitor"]
rtp_nat:
description:
- Enable Real Time Protocol (RTP) NAT.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
rtp_addr:
description:
- Address names if this is an RTP NAT policy.
required: false
rsso:
description:
- Enable/disable RADIUS single sign-on (RSSO).
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
replacemsg_override_group:
description:
- Override the default replacement message group for this policy.
required: false
redirect_url:
description:
- URL users are directed to after seeing and accepting the disclaimer or authenticating.
required: false
radius_mac_auth_bypass:
description:
- Enable MAC authentication bypass. The bypassed MAC address must be received from RADIUS server.
- choice | disable | Disable MAC authentication bypass.
- choice | enable | Enable MAC authentication bypass.
required: false
choices: ["disable", "enable"]
profile_type:
description:
- Determine whether the firewall policy allows security profile groups or single profiles only.
- choice | single | Do not allow security profile groups.
- choice | group | Allow security profile groups.
required: false
choices: ["single", "group"]
profile_protocol_options:
description:
- Name of an existing Protocol options profile.
required: false
profile_group:
description:
- Name of profile group.
required: false
poolname:
description:
- IP Pool names.
required: false
policyid:
description:
- Policy ID.
required: false
permit_stun_host:
description:
- Accept UDP packets from any Session Traversal Utilities for NAT (STUN) host.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
permit_any_host:
description:
- Accept UDP packets from any host.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
per_ip_shaper:
description:
- Per-IP traffic shaper.
required: false
outbound:
description:
- Policy-based IPsec VPN | only traffic from the internal network can initiate a VPN.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
ntlm_guest:
description:
- Enable/disable NTLM guest user access.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
ntlm_enabled_browsers:
description:
- HTTP-User-Agent value of supported browsers.
required: false
ntlm:
description:
- Enable/disable NTLM authentication.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
np_acceleration:
description:
- Enable/disable UTM Network Processor acceleration.
- choice | disable | Disable UTM Network Processor acceleration.
- choice | enable | Enable UTM Network Processor acceleration.
required: false
choices: ["disable", "enable"]
natoutbound:
description:
- Policy-based IPsec VPN | apply source NAT to outbound traffic.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
natip:
description:
- Policy-based IPsec VPN | source NAT IP address for outgoing traffic.
required: false
natinbound:
description:
- Policy-based IPsec VPN | apply destination NAT to inbound traffic.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
nat:
description:
- Enable/disable source NAT.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
name:
description:
- Policy name.
required: false
mms_profile:
description:
- Name of an existing MMS profile.
required: false
match_vip:
description:
- Enable to match packets that have had their destination addresses changed by a VIP.
- choice | disable | Do not match DNATed packet.
- choice | enable | Match DNATed packet.
required: false
choices: ["disable", "enable"]
logtraffic_start:
description:
- Record logs when a session starts and ends.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
logtraffic:
description:
- Enable or disable logging. Log all sessions or security profile sessions.
- choice | disable | Disable all logging for this policy.
- choice | all | Log all sessions accepted or denied by this policy.
- choice | utm | Log traffic that has a security profile applied to it.
required: false
choices: ["disable", "all", "utm"]
learning_mode:
description:
- Enable to allow everything, but log all of the meaningful data for security information gathering.
- choice | disable | Disable learning mode in firewall policy.
- choice | enable | Enable learning mode in firewall policy.
required: false
choices: ["disable", "enable"]
label:
description:
- Label for the policy that appears when the GUI is in Section View mode.
required: false
ips_sensor:
description:
- Name of an existing IPS sensor.
required: false
ippool:
description:
- Enable to use IP Pools for source NAT.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
internet_service_src_negate:
description:
- When enabled internet-service-src specifies what the service must NOT be.
- choice | disable | Disable negated Internet Service source match.
- choice | enable | Enable negated Internet Service source match.
required: false
choices: ["disable", "enable"]
internet_service_src_id:
description:
- Internet Service source ID.
required: false
internet_service_src_custom:
description:
- Custom Internet Service source name.
required: false
internet_service_src:
description:
- Enable/disable use of Internet Services in source for this policy. If enabled, source address is not used.
- choice | disable | Disable use of Internet Services source in policy.
- choice | enable | Enable use of Internet Services source in policy.
required: false
choices: ["disable", "enable"]
internet_service_negate:
description:
- When enabled internet-service specifies what the service must NOT be.
- choice | disable | Disable negated Internet Service match.
- choice | enable | Enable negated Internet Service match.
required: false
choices: ["disable", "enable"]
internet_service_id:
description:
- Internet Service ID.
required: false
internet_service_custom:
description:
- Custom Internet Service name.
required: false
internet_service:
description:
- Enable/disable use of Internet Services for this policy. If enabled, dstaddr and service are not used.
- choice | disable | Disable use of Internet Services in policy.
- choice | enable | Enable use of Internet Services in policy.
required: false
choices: ["disable", "enable"]
inbound:
description:
- Policy-based IPsec VPN | only traffic from the remote network can initiate a VPN.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
identity_based_route:
description:
- Name of identity-based routing rule.
required: false
icap_profile:
description:
- Name of an existing ICAP profile.
required: false
gtp_profile:
description:
- GTP profile.
required: false
groups:
description:
- Names of user groups that can authenticate with this policy.
required: false
global_label:
description:
- Label for the policy that appears when the GUI is in Global View mode.
required: false
fsso_agent_for_ntlm:
description:
- FSSO agent to use for NTLM authentication.
required: false
fsso:
description:
- Enable/disable Fortinet Single Sign-On.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
fixedport:
description:
- Enable to prevent source NAT from changing a session's source port.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
firewall_session_dirty:
description:
- How to handle sessions if the configuration of this firewall policy changes.
- choice | check-all | Flush all current sessions accepted by this policy.
- choice | check-new | Continue to allow sessions already accepted by this policy.
required: false
choices: ["check-all", "check-new"]
dstintf:
description:
- Outgoing (egress) interface.
required: false
dstaddr_negate:
description:
- When enabled dstaddr specifies what the destination address must NOT be.
- choice | disable | Disable destination address negate.
- choice | enable | Enable destination address negate.
required: false
choices: ["disable", "enable"]
dstaddr:
description:
- Destination address and address group names.
required: false
dsri:
description:
- Enable DSRI to ignore HTTP server responses.
- choice | disable | Disable DSRI.
- choice | enable | Enable DSRI.
required: false
choices: ["disable", "enable"]
dscp_value:
description:
- DSCP value.
required: false
dscp_negate:
description:
- Enable negated DSCP match.
- choice | disable | Disable DSCP negate.
- choice | enable | Enable DSCP negate.
required: false
choices: ["disable", "enable"]
dscp_match:
description:
- Enable DSCP check.
- choice | disable | Disable DSCP check.
- choice | enable | Enable DSCP check.
required: false
choices: ["disable", "enable"]
dnsfilter_profile:
description:
- Name of an existing DNS filter profile.
required: false
dlp_sensor:
description:
- Name of an existing DLP sensor.
required: false
disclaimer:
description:
- Enable/disable user authentication disclaimer.
- choice | disable | Disable user authentication disclaimer.
- choice | enable | Enable user authentication disclaimer.
required: false
choices: ["disable", "enable"]
diffservcode_rev:
description:
- Change packet's reverse (reply) DiffServ to this value.
required: false
diffservcode_forward:
description:
- Change packet's DiffServ to this value.
required: false
diffserv_reverse:
description:
- Enable to change packet's reverse (reply) DiffServ values to the specified diffservcode-rev value.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
diffserv_forward:
description:
- Enable to change packet's DiffServ values to the specified diffservcode-forward value.
- choice | disable | Disable WAN optimization.
- choice | enable | Enable WAN optimization.
required: false
choices: ["disable", "enable"]
devices:
description:
- Names of devices or device groups that can be matched by the policy.
required: false
delay_tcp_npu_session:
description:
- Enable TCP NPU session delay to guarantee packet order of 3-way handshake.
- choice | disable | Disable TCP NPU session delay in order to guarantee packet order of 3-way handshake.
- choice | enable | Enable TCP NPU session delay in order to guarantee packet order of 3-way handshake.
required: false
choices: ["disable", "enable"]
custom_log_fields:
description:
- Custom fields to append to log messages for this policy.
required: false
comments:
description:
- Comment.
required: false
capture_packet:
description:
- Enable/disable capture packets.
- choice | disable | Disable capture packets.
- choice | enable | Enable capture packets.
required: false
choices: ["disable", "enable"]
captive_portal_exempt:
description:
- Enable to exempt some users from the captive portal.
- choice | disable | Disable exemption of captive portal.
- choice | enable | Enable exemption of captive portal.
required: false
choices: ["disable", "enable"]
block_notification:
description:
- Enable/disable block notification.
- choice | disable | Disable setting.
- choice | enable | Enable setting.
required: false
choices: ["disable", "enable"]
av_profile:
description:
- Name of an existing Antivirus profile.
required: false
auto_asic_offload:
description:
- Enable/disable offloading security profile processing to CP processors.
- choice | disable | Disable ASIC offloading.
- choice | enable | Enable auto ASIC offloading.
required: false
choices: ["disable", "enable"]
auth_redirect_addr:
description:
- HTTP-to-HTTPS redirect address for firewall authentication.
required: false
auth_path:
description:
- Enable/disable authentication-based routing.
- choice | disable | Disable authentication-based routing.
- choice | enable | Enable authentication-based routing.
required: false
choices: ["disable", "enable"]
auth_cert:
description:
- HTTPS server certificate for policy authentication.
required: false
application_list:
description:
- Name of an existing Application list.
required: false
application:
description:
- Application ID list.
required: false
app_group:
description:
- Application group names.
required: false
app_category:
description:
- Application category ID list.
required: false
action:
description:
- Policy action (allow/deny/ipsec).
- choice | deny | Blocks sessions that match the firewall policy.
- choice | accept | Allows session that match the firewall policy.
- choice | ipsec | Firewall policy becomes a policy-based IPsec VPN policy.
required: false
choices: ["deny", "accept", "ipsec"]
vpn_dst_node:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED. This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
required: false
vpn_dst_node_host:
description:
- VPN Destination Node Host.
required: false
vpn_dst_node_seq:
description:
- VPN Destination Node Seq.
required: false
vpn_dst_node_subnet:
description:
- VPN Destination Node Seq.
required: false
vpn_src_node:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED. This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
required: false
vpn_src_node_host:
description:
- VPN Source Node Host.
required: false
vpn_src_node_seq:
description:
- VPN Source Node Seq.
required: false
vpn_src_node_subnet:
description:
- VPN Source Node.
required: false
'''
EXAMPLES = '''
- name: ADD VERY BASIC IPV4 POLICY WITH NO NAT (WIDE OPEN)
fmgr_fwpol_ipv4:
mode: "set"
adom: "ansible"
package_name: "default"
name: "Basic_IPv4_Policy"
comments: "Created by Ansible"
action: "accept"
dstaddr: "all"
srcaddr: "all"
dstintf: "any"
srcintf: "any"
logtraffic: "utm"
service: "ALL"
schedule: "always"
- name: ADD VERY BASIC IPV4 POLICY WITH NAT AND MULTIPLE ENTRIES
fmgr_fwpol_ipv4:
mode: "set"
adom: "ansible"
package_name: "default"
name: "Basic_IPv4_Policy_2"
comments: "Created by Ansible"
action: "accept"
dstaddr: "google-play"
srcaddr: "all"
dstintf: "any"
srcintf: "any"
logtraffic: "utm"
service: "HTTP, HTTPS"
schedule: "always"
nat: "enable"
users: "karen, kevin"
- name: ADD VERY BASIC IPV4 POLICY WITH NAT AND MULTIPLE ENTRIES AND SEC PROFILES
fmgr_fwpol_ipv4:
mode: "set"
adom: "ansible"
package_name: "default"
name: "Basic_IPv4_Policy_3"
comments: "Created by Ansible"
action: "accept"
dstaddr: "google-play, autoupdate.opera.com"
srcaddr: "corp_internal"
dstintf: "zone_wan1, zone_wan2"
srcintf: "zone_int1"
logtraffic: "utm"
service: "HTTP, HTTPS"
schedule: "always"
nat: "enable"
users: "karen, kevin"
av_profile: "sniffer-profile"
ips_sensor: "default"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
from ansible.module_utils.network.fortimanager.common import prepare_dict
from ansible.module_utils.network.fortimanager.common import scrub_dict
def fmgr_firewall_policy_modify(fmgr, paramgram):
"""
fmgr_firewall_policy -- Add/Set/Deletes Firewall Policy Objects defined in the "paramgram"
:param fmgr: The fmgr object instance from fmgr_utils.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/pkg/{pkg}/firewall/policy'.format(adom=adom, pkg=paramgram["package_name"])
datagram = scrub_dict((prepare_dict(paramgram)))
del datagram["package_name"]
datagram = fmgr._tools.split_comma_strings_into_lists(datagram)
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
url = '/pm/config/adom/{adom}/pkg/{pkg}/firewall' \
'/policy/{policyid}'.format(adom=paramgram["adom"],
pkg=paramgram["package_name"],
policyid=paramgram["policyid"])
datagram = {
"policyid": paramgram["policyid"]
}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
package_name=dict(type="str", required=False, default="default"),
fail_on_missing_dependency=dict(type="str", required=False, default="disable", choices=["enable",
"disable"]),
wsso=dict(required=False, type="str", choices=["disable", "enable"]),
webfilter_profile=dict(required=False, type="str"),
webcache_https=dict(required=False, type="str", choices=["disable", "enable"]),
webcache=dict(required=False, type="str", choices=["disable", "enable"]),
wccp=dict(required=False, type="str", choices=["disable", "enable"]),
wanopt_profile=dict(required=False, type="str"),
wanopt_peer=dict(required=False, type="str"),
wanopt_passive_opt=dict(required=False, type="str", choices=["default", "transparent", "non-transparent"]),
wanopt_detection=dict(required=False, type="str", choices=["active", "passive", "off"]),
wanopt=dict(required=False, type="str", choices=["disable", "enable"]),
waf_profile=dict(required=False, type="str"),
vpntunnel=dict(required=False, type="str"),
voip_profile=dict(required=False, type="str"),
vlan_filter=dict(required=False, type="str"),
vlan_cos_rev=dict(required=False, type="int"),
vlan_cos_fwd=dict(required=False, type="int"),
utm_status=dict(required=False, type="str", choices=["disable", "enable"]),
users=dict(required=False, type="str"),
url_category=dict(required=False, type="str"),
traffic_shaper_reverse=dict(required=False, type="str"),
traffic_shaper=dict(required=False, type="str"),
timeout_send_rst=dict(required=False, type="str", choices=["disable", "enable"]),
tcp_session_without_syn=dict(required=False, type="str", choices=["all", "data-only", "disable"]),
tcp_mss_sender=dict(required=False, type="int"),
tcp_mss_receiver=dict(required=False, type="int"),
status=dict(required=False, type="str", choices=["disable", "enable"]),
ssl_ssh_profile=dict(required=False, type="str"),
ssl_mirror_intf=dict(required=False, type="str"),
ssl_mirror=dict(required=False, type="str", choices=["disable", "enable"]),
ssh_filter_profile=dict(required=False, type="str"),
srcintf=dict(required=False, type="str"),
srcaddr_negate=dict(required=False, type="str", choices=["disable", "enable"]),
srcaddr=dict(required=False, type="str"),
spamfilter_profile=dict(required=False, type="str"),
session_ttl=dict(required=False, type="int"),
service_negate=dict(required=False, type="str", choices=["disable", "enable"]),
service=dict(required=False, type="str"),
send_deny_packet=dict(required=False, type="str", choices=["disable", "enable"]),
schedule_timeout=dict(required=False, type="str", choices=["disable", "enable"]),
schedule=dict(required=False, type="str"),
scan_botnet_connections=dict(required=False, type="str", choices=["disable", "block", "monitor"]),
rtp_nat=dict(required=False, type="str", choices=["disable", "enable"]),
rtp_addr=dict(required=False, type="str"),
rsso=dict(required=False, type="str", choices=["disable", "enable"]),
replacemsg_override_group=dict(required=False, type="str"),
redirect_url=dict(required=False, type="str"),
radius_mac_auth_bypass=dict(required=False, type="str", choices=["disable", "enable"]),
profile_type=dict(required=False, type="str", choices=["single", "group"]),
profile_protocol_options=dict(required=False, type="str"),
profile_group=dict(required=False, type="str"),
poolname=dict(required=False, type="str"),
policyid=dict(required=False, type="str"),
permit_stun_host=dict(required=False, type="str", choices=["disable", "enable"]),
permit_any_host=dict(required=False, type="str", choices=["disable", "enable"]),
per_ip_shaper=dict(required=False, type="str"),
outbound=dict(required=False, type="str", choices=["disable", "enable"]),
ntlm_guest=dict(required=False, type="str", choices=["disable", "enable"]),
ntlm_enabled_browsers=dict(required=False, type="str"),
ntlm=dict(required=False, type="str", choices=["disable", "enable"]),
np_acceleration=dict(required=False, type="str", choices=["disable", "enable"]),
natoutbound=dict(required=False, type="str", choices=["disable", "enable"]),
natip=dict(required=False, type="str"),
natinbound=dict(required=False, type="str", choices=["disable", "enable"]),
nat=dict(required=False, type="str", choices=["disable", "enable"]),
name=dict(required=False, type="str"),
mms_profile=dict(required=False, type="str"),
match_vip=dict(required=False, type="str", choices=["disable", "enable"]),
logtraffic_start=dict(required=False, type="str", choices=["disable", "enable"]),
logtraffic=dict(required=False, type="str", choices=["disable", "all", "utm"]),
learning_mode=dict(required=False, type="str", choices=["disable", "enable"]),
label=dict(required=False, type="str"),
ips_sensor=dict(required=False, type="str"),
ippool=dict(required=False, type="str", choices=["disable", "enable"]),
internet_service_src_negate=dict(required=False, type="str", choices=["disable", "enable"]),
internet_service_src_id=dict(required=False, type="str"),
internet_service_src_custom=dict(required=False, type="str"),
internet_service_src=dict(required=False, type="str", choices=["disable", "enable"]),
internet_service_negate=dict(required=False, type="str", choices=["disable", "enable"]),
internet_service_id=dict(required=False, type="str"),
internet_service_custom=dict(required=False, type="str"),
internet_service=dict(required=False, type="str", choices=["disable", "enable"]),
inbound=dict(required=False, type="str", choices=["disable", "enable"]),
identity_based_route=dict(required=False, type="str"),
icap_profile=dict(required=False, type="str"),
gtp_profile=dict(required=False, type="str"),
groups=dict(required=False, type="str"),
global_label=dict(required=False, type="str"),
fsso_agent_for_ntlm=dict(required=False, type="str"),
fsso=dict(required=False, type="str", choices=["disable", "enable"]),
fixedport=dict(required=False, type="str", choices=["disable", "enable"]),
firewall_session_dirty=dict(required=False, type="str", choices=["check-all", "check-new"]),
dstintf=dict(required=False, type="str"),
dstaddr_negate=dict(required=False, type="str", choices=["disable", "enable"]),
dstaddr=dict(required=False, type="str"),
dsri=dict(required=False, type="str", choices=["disable", "enable"]),
dscp_value=dict(required=False, type="str"),
dscp_negate=dict(required=False, type="str", choices=["disable", "enable"]),
dscp_match=dict(required=False, type="str", choices=["disable", "enable"]),
dnsfilter_profile=dict(required=False, type="str"),
dlp_sensor=dict(required=False, type="str"),
disclaimer=dict(required=False, type="str", choices=["disable", "enable"]),
diffservcode_rev=dict(required=False, type="str"),
diffservcode_forward=dict(required=False, type="str"),
diffserv_reverse=dict(required=False, type="str", choices=["disable", "enable"]),
diffserv_forward=dict(required=False, type="str", choices=["disable", "enable"]),
devices=dict(required=False, type="str"),
delay_tcp_npu_session=dict(required=False, type="str", choices=["disable", "enable"]),
custom_log_fields=dict(required=False, type="str"),
comments=dict(required=False, type="str"),
capture_packet=dict(required=False, type="str", choices=["disable", "enable"]),
captive_portal_exempt=dict(required=False, type="str", choices=["disable", "enable"]),
block_notification=dict(required=False, type="str", choices=["disable", "enable"]),
av_profile=dict(required=False, type="str"),
auto_asic_offload=dict(required=False, type="str", choices=["disable", "enable"]),
auth_redirect_addr=dict(required=False, type="str"),
auth_path=dict(required=False, type="str", choices=["disable", "enable"]),
auth_cert=dict(required=False, type="str"),
application_list=dict(required=False, type="str"),
application=dict(required=False, type="str"),
app_group=dict(required=False, type="str"),
app_category=dict(required=False, type="str"),
action=dict(required=False, type="str", choices=["deny", "accept", "ipsec"]),
vpn_dst_node=dict(required=False, type="list"),
vpn_dst_node_host=dict(required=False, type="str"),
vpn_dst_node_seq=dict(required=False, type="str"),
vpn_dst_node_subnet=dict(required=False, type="str"),
vpn_src_node=dict(required=False, type="list"),
vpn_src_node_host=dict(required=False, type="str"),
vpn_src_node_seq=dict(required=False, type="str"),
vpn_src_node_subnet=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"package_name": module.params["package_name"],
"wsso": module.params["wsso"],
"webfilter-profile": module.params["webfilter_profile"],
"webcache-https": module.params["webcache_https"],
"webcache": module.params["webcache"],
"wccp": module.params["wccp"],
"wanopt-profile": module.params["wanopt_profile"],
"wanopt-peer": module.params["wanopt_peer"],
"wanopt-passive-opt": module.params["wanopt_passive_opt"],
"wanopt-detection": module.params["wanopt_detection"],
"wanopt": module.params["wanopt"],
"waf-profile": module.params["waf_profile"],
"vpntunnel": module.params["vpntunnel"],
"voip-profile": module.params["voip_profile"],
"vlan-filter": module.params["vlan_filter"],
"vlan-cos-rev": module.params["vlan_cos_rev"],
"vlan-cos-fwd": module.params["vlan_cos_fwd"],
"utm-status": module.params["utm_status"],
"users": module.params["users"],
"url-category": module.params["url_category"],
"traffic-shaper-reverse": module.params["traffic_shaper_reverse"],
"traffic-shaper": module.params["traffic_shaper"],
"timeout-send-rst": module.params["timeout_send_rst"],
"tcp-session-without-syn": module.params["tcp_session_without_syn"],
"tcp-mss-sender": module.params["tcp_mss_sender"],
"tcp-mss-receiver": module.params["tcp_mss_receiver"],
"status": module.params["status"],
"ssl-ssh-profile": module.params["ssl_ssh_profile"],
"ssl-mirror-intf": module.params["ssl_mirror_intf"],
"ssl-mirror": module.params["ssl_mirror"],
"ssh-filter-profile": module.params["ssh_filter_profile"],
"srcintf": module.params["srcintf"],
"srcaddr-negate": module.params["srcaddr_negate"],
"srcaddr": module.params["srcaddr"],
"spamfilter-profile": module.params["spamfilter_profile"],
"session-ttl": module.params["session_ttl"],
"service-negate": module.params["service_negate"],
"service": module.params["service"],
"send-deny-packet": module.params["send_deny_packet"],
"schedule-timeout": module.params["schedule_timeout"],
"schedule": module.params["schedule"],
"scan-botnet-connections": module.params["scan_botnet_connections"],
"rtp-nat": module.params["rtp_nat"],
"rtp-addr": module.params["rtp_addr"],
"rsso": module.params["rsso"],
"replacemsg-override-group": module.params["replacemsg_override_group"],
"redirect-url": module.params["redirect_url"],
"radius-mac-auth-bypass": module.params["radius_mac_auth_bypass"],
"profile-type": module.params["profile_type"],
"profile-protocol-options": module.params["profile_protocol_options"],
"profile-group": module.params["profile_group"],
"poolname": module.params["poolname"],
"policyid": module.params["policyid"],
"permit-stun-host": module.params["permit_stun_host"],
"permit-any-host": module.params["permit_any_host"],
"per-ip-shaper": module.params["per_ip_shaper"],
"outbound": module.params["outbound"],
"ntlm-guest": module.params["ntlm_guest"],
"ntlm-enabled-browsers": module.params["ntlm_enabled_browsers"],
"ntlm": module.params["ntlm"],
"np-acceleration": module.params["np_acceleration"],
"natoutbound": module.params["natoutbound"],
"natip": module.params["natip"],
"natinbound": module.params["natinbound"],
"nat": module.params["nat"],
"name": module.params["name"],
"mms-profile": module.params["mms_profile"],
"match-vip": module.params["match_vip"],
"logtraffic-start": module.params["logtraffic_start"],
"logtraffic": module.params["logtraffic"],
"learning-mode": module.params["learning_mode"],
"label": module.params["label"],
"ips-sensor": module.params["ips_sensor"],
"ippool": module.params["ippool"],
"internet-service-src-negate": module.params["internet_service_src_negate"],
"internet-service-src-id": module.params["internet_service_src_id"],
"internet-service-src-custom": module.params["internet_service_src_custom"],
"internet-service-src": module.params["internet_service_src"],
"internet-service-negate": module.params["internet_service_negate"],
"internet-service-id": module.params["internet_service_id"],
"internet-service-custom": module.params["internet_service_custom"],
"internet-service": module.params["internet_service"],
"inbound": module.params["inbound"],
"identity-based-route": module.params["identity_based_route"],
"icap-profile": module.params["icap_profile"],
"gtp-profile": module.params["gtp_profile"],
"groups": module.params["groups"],
"global-label": module.params["global_label"],
"fsso-agent-for-ntlm": module.params["fsso_agent_for_ntlm"],
"fsso": module.params["fsso"],
"fixedport": module.params["fixedport"],
"firewall-session-dirty": module.params["firewall_session_dirty"],
"dstintf": module.params["dstintf"],
"dstaddr-negate": module.params["dstaddr_negate"],
"dstaddr": module.params["dstaddr"],
"dsri": module.params["dsri"],
"dscp-value": module.params["dscp_value"],
"dscp-negate": module.params["dscp_negate"],
"dscp-match": module.params["dscp_match"],
"dnsfilter-profile": module.params["dnsfilter_profile"],
"dlp-sensor": module.params["dlp_sensor"],
"disclaimer": module.params["disclaimer"],
"diffservcode-rev": module.params["diffservcode_rev"],
"diffservcode-forward": module.params["diffservcode_forward"],
"diffserv-reverse": module.params["diffserv_reverse"],
"diffserv-forward": module.params["diffserv_forward"],
"devices": module.params["devices"],
"delay-tcp-npu-session": module.params["delay_tcp_npu_session"],
"custom-log-fields": module.params["custom_log_fields"],
"comments": module.params["comments"],
"capture-packet": module.params["capture_packet"],
"captive-portal-exempt": module.params["captive_portal_exempt"],
"block-notification": module.params["block_notification"],
"av-profile": module.params["av_profile"],
"auto-asic-offload": module.params["auto_asic_offload"],
"auth-redirect-addr": module.params["auth_redirect_addr"],
"auth-path": module.params["auth_path"],
"auth-cert": module.params["auth_cert"],
"application-list": module.params["application_list"],
"application": module.params["application"],
"app-group": module.params["app_group"],
"app-category": module.params["app_category"],
"action": module.params["action"],
"vpn_dst_node": {
"host": module.params["vpn_dst_node_host"],
"seq": module.params["vpn_dst_node_seq"],
"subnet": module.params["vpn_dst_node_subnet"],
},
"vpn_src_node": {
"host": module.params["vpn_src_node_host"],
"seq": module.params["vpn_src_node_seq"],
"subnet": module.params["vpn_src_node_subnet"],
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ['vpn_dst_node', 'vpn_src_node']
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
# BEGIN MODULE-SPECIFIC LOGIC -- THINGS NEED TO HAPPEN DEPENDING ON THE ENDPOINT AND OPERATION
results = DEFAULT_RESULT_OBJ
try:
if paramgram["mode"] == "delete":
# WE NEED TO GET THE POLICY ID FROM THE NAME OF THE POLICY TO DELETE IT
url = '/pm/config/adom/{adom}/pkg/{pkg}/firewall' \
'/policy/'.format(adom=paramgram["adom"],
pkg=paramgram["package_name"])
datagram = {
"filter": ["name", "==", paramgram["name"]]
}
response = fmgr.process_request(url, datagram, FMGRMethods.GET)
try:
if response[1][0]["policyid"]:
policy_id = response[1][0]["policyid"]
paramgram["policyid"] = policy_id
except BaseException:
fmgr.return_response(module=module, results=response, good_codes=[0, ], stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram),
msg="Couldn't find policy ID number for policy name specified.")
except Exception as err:
raise FMGBaseException(err)
try:
results = fmgr_firewall_policy_modify(fmgr, paramgram)
if module.params["fail_on_missing_dependency"] == "disable":
fmgr.govern_response(module=module, results=results, good_codes=[0, -9998],
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
if module.params["fail_on_missing_dependency"] == "enable" and results[0] == -10131:
fmgr.govern_response(module=module, results=results, good_codes=[0, ], failed=True, skipped=False,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
iulian787/spack | lib/spack/external/jinja2/bccache.py | 84 | 12794 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import sys
import stat
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 3
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal_load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
def _unsafe_dir():
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
_unsafe_dir()
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <https://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
| lgpl-2.1 |
entropy1337/infernal-twin | Modules/build/pip/build/lib.linux-i686-2.7/pip/_vendor/colorama/win32.py | 446 | 5121 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| gpl-3.0 |
yufish/youtube-dl | youtube_dl/extractor/macgamestore.py | 142 | 1275 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class MacGameStoreIE(InfoExtractor):
IE_NAME = 'macgamestore'
IE_DESC = 'MacGameStore trailers'
_VALID_URL = r'https?://www\.macgamestore\.com/mediaviewer\.php\?trailer=(?P<id>\d+)'
_TEST = {
'url': 'http://www.macgamestore.com/mediaviewer.php?trailer=2450',
'md5': '8649b8ea684b6666b4c5be736ecddc61',
'info_dict': {
'id': '2450',
'ext': 'm4v',
'title': 'Crow',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
url, video_id, 'Downloading trailer page')
if '>Missing Media<' in webpage:
raise ExtractorError(
'Trailer %s does not exist' % video_id, expected=True)
video_title = self._html_search_regex(
r'<title>MacGameStore: (.*?) Trailer</title>', webpage, 'title')
video_url = self._html_search_regex(
r'(?s)<div\s+id="video-player".*?href="([^"]+)"\s*>',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title
}
| unlicense |
smartboyathome/Wonderland-Engine | tests/DoorknobTests/test_mongodb_service_checks.py | 1 | 4975 | '''
Copyright (c) 2012 Alexander Abbott
This file is part of the Cheshire Cyber Defense Scoring Engine (henceforth
referred to as Cheshire).
Cheshire is free software: you can redistribute it and/or modify it under
the terms of the GNU Affero General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
Cheshire is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for
more details.
You should have received a copy of the GNU Affero General Public License
along with Cheshire. If not, see <http://www.gnu.org/licenses/>.
'''
from copy import deepcopy
from datetime import datetime
from Doorknob.Exceptions import Exists, DoesNotExist
from tests.DoorknobTests import DBTestCase
from .. import show_difference_between_dicts
class TestMongoDBServiceChecks(DBTestCase):
def test_get_all_service_checks(self):
wrapper_result = self.db_wrapper.get_all_service_checks()
expected_result = [deepcopy(obj) for obj in self.data['active_checks'] if obj['type'] == 'service']
for item in expected_result:
del item['type']
assert len(wrapper_result) == len(expected_result)
assert wrapper_result == expected_result
def test_get_specific_service_check(self):
expected_result = [deepcopy(obj) for obj in self.data['active_checks'] if obj['type'] == 'service'][0]
result_id = expected_result['id']
del expected_result['type'], expected_result['id']
wrapper_result = self.db_wrapper.get_specific_service_check(result_id)[0]
assert wrapper_result == expected_result
def test_get_specific_service_check_nonexistant(self):
wrapper_result = self.db_wrapper.get_specific_service_check('NonexistantServiceUp')
expected_result = []
assert wrapper_result == expected_result
def test_create_service_check(self):
self.db_wrapper.create_service_check('NginxUp', 'Checks if nginx is up', 'MongoDB', 'SampleServiceCheck')
wrapper_result = list(self.db.active_checks.find({'id': 'NginxUp', 'type': 'service'}, {'_id': 0, 'id': 0, 'type': 0}))
expected_result = [{
"description": 'Checks if nginx is up',
"machine": 'MongoDB',
"class_name": 'SampleServiceCheck'
}]
assert not len(wrapper_result) == 0
assert wrapper_result == expected_result
def test_create_service_check_exists(self):
with self.assertRaises(Exists):
self.db_wrapper.create_service_check('MongoDBUp', 'Checks whether MongoDB is up.', 'MongoDB', 'SampleServiceCheck')
def test_modify_service_check(self):
self.db_wrapper.modify_service_check('EmailUp', description='Checks if the email server is up', machine='Redis')
wrapper_result = list(self.db.active_checks.find({'id': 'EmailUp', 'type': 'service'}, {'_id': 0, 'id': 0, 'type': 0}))
expected_result = [{
"description": 'Checks if the email server is up',
"machine": 'Redis',
"class_name": 'SampleServiceCheck'
}]
assert not len(wrapper_result) == 0
assert wrapper_result == expected_result
def test_modify_service_check_nonexistant(self):
with self.assertRaises(DoesNotExist):
self.db_wrapper.modify_service_check('NonexistantServiceUp', description='Check whether a nonexistant service is up.', machine='MongoDB', class_name='SampleServiceCheck')
def test_complete_service_check(self):
time = self.floor_time_to_milliseconds(datetime.now())
self.db_wrapper.complete_service_check('MongoDBUp', '1', time, 0)
wrapper_result = list(self.db.completed_checks.find({'id': 'MongoDBUp', 'type': 'service', 'team_id': '1', 'timestamp': time}, {'_id': 0, 'id': 0, 'type': 0, 'team_id': 0, 'timestamp': 0}))[0]
expected_result = [deepcopy(obj) for obj in self.data['active_checks'] if obj['type'] == 'service' and obj['id'] == 'MongoDBUp'][0]
del expected_result['id'], expected_result['type'], expected_result['machine'], expected_result['class_name']
expected_result['score'] = 0
show_difference_between_dicts(wrapper_result, expected_result)
assert wrapper_result == expected_result
def test_delete_service_check(self):
self.db_wrapper.delete_service_check('DeadThingUp')
wrapper_result = list(self.db.active_checks.find({'id': 'DeadThingUp', 'type': 'service'}, {'_id': 0, 'id': 0, 'type': 0}))
expected_result = []
assert wrapper_result == expected_result
def test_delete_service_check_nonexistant(self):
with self.assertRaises(DoesNotExist):
self.db_wrapper.delete_service_check('NonexistantServiceCheck') | agpl-3.0 |
mhei/linux | tools/perf/scripts/python/stackcollapse.py | 270 | 4360 | # stackcollapse.py - format perf samples with one line per distinct call stack
#
# This script's output has two space-separated fields. The first is a semicolon
# separated stack including the program name (from the "comm" field) and the
# function names from the call stack. The second is a count:
#
# swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 2
#
# The file is sorted according to the first field.
#
# Input may be created and processed using:
#
# perf record -a -g -F 99 sleep 60
# perf script report stackcollapse > out.stacks-folded
#
# (perf script record stackcollapse works too).
#
# Written by Paolo Bonzini <pbonzini@redhat.com>
# Based on Brendan Gregg's stackcollapse-perf.pl script.
import os
import sys
from collections import defaultdict
from optparse import OptionParser, make_option
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from EventClass import *
# command line parsing
option_list = [
# formatting options for the bottom entry of the stack
make_option("--include-tid", dest="include_tid",
action="store_true", default=False,
help="include thread id in stack"),
make_option("--include-pid", dest="include_pid",
action="store_true", default=False,
help="include process id in stack"),
make_option("--no-comm", dest="include_comm",
action="store_false", default=True,
help="do not separate stacks according to comm"),
make_option("--tidy-java", dest="tidy_java",
action="store_true", default=False,
help="beautify Java signatures"),
make_option("--kernel", dest="annotate_kernel",
action="store_true", default=False,
help="annotate kernel functions with _[k]")
]
parser = OptionParser(option_list=option_list)
(opts, args) = parser.parse_args()
if len(args) != 0:
parser.error("unexpected command line argument")
if opts.include_tid and not opts.include_comm:
parser.error("requesting tid but not comm is invalid")
if opts.include_pid and not opts.include_comm:
parser.error("requesting pid but not comm is invalid")
# event handlers
lines = defaultdict(lambda: 0)
def process_event(param_dict):
def tidy_function_name(sym, dso):
if sym is None:
sym = '[unknown]'
sym = sym.replace(';', ':')
if opts.tidy_java:
# the original stackcollapse-perf.pl script gives the
# example of converting this:
# Lorg/mozilla/javascript/MemberBox;.<init>(Ljava/lang/reflect/Method;)V
# to this:
# org/mozilla/javascript/MemberBox:.init
sym = sym.replace('<', '')
sym = sym.replace('>', '')
if sym[0] == 'L' and sym.find('/'):
sym = sym[1:]
try:
sym = sym[:sym.index('(')]
except ValueError:
pass
if opts.annotate_kernel and dso == '[kernel.kallsyms]':
return sym + '_[k]'
else:
return sym
stack = list()
if 'callchain' in param_dict:
for entry in param_dict['callchain']:
entry.setdefault('sym', dict())
entry['sym'].setdefault('name', None)
entry.setdefault('dso', None)
stack.append(tidy_function_name(entry['sym']['name'],
entry['dso']))
else:
param_dict.setdefault('symbol', None)
param_dict.setdefault('dso', None)
stack.append(tidy_function_name(param_dict['symbol'],
param_dict['dso']))
if opts.include_comm:
comm = param_dict["comm"].replace(' ', '_')
sep = "-"
if opts.include_pid:
comm = comm + sep + str(param_dict['sample']['pid'])
sep = "/"
if opts.include_tid:
comm = comm + sep + str(param_dict['sample']['tid'])
stack.append(comm)
stack_string = ';'.join(reversed(stack))
lines[stack_string] = lines[stack_string] + 1
def trace_end():
list = lines.keys()
list.sort()
for stack in list:
print "%s %d" % (stack, lines[stack])
| gpl-2.0 |
joanneko/goodbooks | goodbooks/lib/python2.7/site-packages/setuptools/tests/test_dist_info.py | 148 | 2261 | """Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import pytest
import pkg_resources
from .textwrap import DALS
class TestDistInfo:
def test_distinfo(self):
dists = dict(
(d.project_name, d)
for d in pkg_resources.find_distributions(self.tmpdir)
)
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@pytest.mark.importorskip('ast')
def test_conditional_dependencies(self):
specs = 'splort==4', 'quux>=1.1'
requires = list(map(pkg_resources.Requirement.parse, specs))
for d in pkg_resources.find_distributions(self.tmpdir):
assert d.requires() == requires[:1]
assert d.requires(extras=('baz',)) == requires
assert d.extras == ['baz']
metadata_template = DALS("""
Metadata-Version: 1.2
Name: {name}
{version}
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
""")
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
dist_info_name = 'VersionedDistribution-2.718.dist-info'
versioned = os.path.join(self.tmpdir, dist_info_name)
os.mkdir(versioned)
with open(os.path.join(versioned, 'METADATA'), 'w+') as metadata_file:
metadata = self.metadata_template.format(
name='VersionedDistribution',
version='',
).replace('\n\n', '\n')
metadata_file.write(metadata)
dist_info_name = 'UnversionedDistribution.dist-info'
unversioned = os.path.join(self.tmpdir, dist_info_name)
os.mkdir(unversioned)
with open(os.path.join(unversioned, 'METADATA'), 'w+') as metadata_file:
metadata = self.metadata_template.format(
name='UnversionedDistribution',
version='Version: 0.3',
)
metadata_file.write(metadata)
def teardown_method(self, method):
shutil.rmtree(self.tmpdir)
| bsd-3-clause |
akfullfo/rainbarrel | rainbarrel/netjson.py | 1 | 7874 | #!/usr/bin/env python
# ________________________________________________________________________
#
# Copyright (C) 2015 Andrew Fullford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ________________________________________________________________________
#
import re
import socket
import json
import logging
__doc__ = """
Transfers python data types as netstring-encapsulated JSON text.
Netstarings have the form:
length:data,
where "length" is a base 10 size written in ascii digits, "data"
is any arbitrary byte-stream. The colon separates the length from the
data. The comma is present to lend limited format validation and
to make streams somewhat human-readable.
See https://en.wikipedia.org/wiki/Netstring.
"""
class Writer(object):
"""
This class provides a call to queue a data item and a separate call to provide
a single socket send(). These methods return the number of unsent queue items
which will be 0 when the queue is empty.
When operating under the control of select.poll or similar, the caller should
enable output events for the socket when a data item is queued, and disable
them when the send method returns zero.
Any errors (lost connections, etc) are raised as exceptions. Recovery will
typically require this object to be discarded and the socket closed.
"""
class Buffer(object):
def __init__(self, data, indent=None):
j = json.dumps(data, indent=indent)
self.data = str(len(j)) + ':' + j + ','
self.remaining = len(self.data)
def __init__(self, sock, **params):
self.log = params.get('log')
if not self.log:
self.log = logging.getLogger(__name__)
self.log.addHandler(logging.NullHandler())
if self.log.isEnabledFor(logging.DEBUG):
self.indent = 4
else:
self.indent = None
self.sock = sock
self.q = []
def queue(self, data=None):
"""
Queues a data object to be transitted as a JSON-encoded netstring.
Returns the length of the queue in data objects (not bytes).
If "data" is None or not specified, the length of the queue
is returned without adding to it.
"""
if data is not None:
self.q.append(self.Buffer(data, indent=self.indent))
if self.log.isEnabledFor(logging.DEBUG):
pending = 0
for item in self.q:
pending += item.remaining
self.log.debug("Queued %d bytes, pending items %d, pending bytes %d",
self.q[-1].remaining, len(self.q), pending)
return len(self.q)
def send(self):
while len(self.q) > 0 and self.q[0].remaining <= 0:
self.log.error("Send queue held item with no remaining data")
self.q.pop(0)
if len(self.q) == 0:
self.log.info("send() called with no data pending")
return 0
cnt = self.sock.send(self.q[0].data, socket.MSG_DONTWAIT)
if cnt <= 0:
raise Exception("Unexpected return %d from socket.send()", cnt)
self.q[0].data = self.q[0].data[:cnt]
self.q[0].remaining -= cnt
if self.q[0].remaining <= 0:
self.log.debug("send() emptied a queue bucket")
self.q.pop(0)
if self.log.isEnabledFor(logging.DEBUG):
pending = 0
for item in self.q:
pending += item.remaining
self.log.debug("Pending items %d, pending bytes %d", len(self.q), pending)
return len(self.q)
class Reader(object):
"""
This class implements a generator that performs a recv() on the socket passed, and
yields the next complete data type received. An exception is raised if an error occurs
including encapsulation and encoding errors.
The "connection_lost" attribute will be True if the remote connection closed.
An exception is raised if the connection is lost while data is still pending.
The "maxsize" param is used to place an upper limit on the number of bytes that must be
read before the ':' separator is encountered. This allows early detection of faulty
encapsulation. The default allows sizes up to 1 TB.
The behavior following an exception is undefined. The caller should discard the object
and close the remote connection.
"""
def __init__(self, sock, **params):
self.log = params.get('log')
if not self.log:
self.log = logging.getLogger(__name__)
self.log.addHandler(logging.NullHandler())
self.sock = sock
self.iosize = params.get('iosize', 4096)
self.connection_lost = False
self.max_colon_offset = len(str(params.get('maxsize', 1024 * 1024 * 1024 * 1024))) + 1
self.pending = ''
self.need = None
# This supports a small extension to the netstring protocol by allowing a couple
# of white-space charcaters between the command and the size. The upshot is you
# can do testing with telnet by sending the data with a CR.
#
self.regex_header = re.compile(r'^(\s{0,2}\d+):')
def recv(self):
data = self.sock.recv(self.iosize)
if data == '':
self.connection_lost = True
if len(self.pending.lstrip()) > 0:
raise Exception("Unexpected EOF with unprocessed data")
return
self.pending += data
while True:
self.log.debug("Pending: >>>%s<<<<", str(self.pending))
if self.need is None:
if self.pending.find(':') < 0:
if len(self.pending) > self.max_colon_offset:
raise Exception("No colon encountered within length limit of %d" %
(self.max_colon_offset,))
self.log.debug("Colon not found with %d packet bytes, continuing", len(self.pending))
return
m = self.regex_header.match(self.pending)
if not m:
raise Exception("Bad length encountered in packet header")
size_str = m.group(1)
self.log.debug("Size str %d bytes: >>>%s<<<<", len(size_str), size_str)
# We need to delay processing until all the data plus the trailing comma have been read.
#
self.need = int(size_str.strip()) + 1
self.log.debug("Found length %d", self.need)
# Now remove the size header, leaving only some fragment of data and possible trailing comma.
#
self.pending = self.pending[len(size_str) + 1:]
if len(self.pending) >= self.need:
data = self.pending[:self.need]
self.pending = self.pending[self.need:]
self.need = None
if data[-1] != ',':
raise Exception("Trailing comma missing from data")
yield json.loads(data[:-1])
else:
self.log.debug("Returning with %d additional data needed", self.need - len(self.pending))
return
| apache-2.0 |
fzalkow/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
dezelin/vbox | src/VBox/GuestHost/OpenGL/packer/pack_currenttypes.py | 16 | 2163 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
# This file is imported by several other Python scripts
current_fns = {
'Color': {
'types': ['b','ub','s','us','i','ui','f','d'],
'sizes': [3,4],
'default': [0,0,0,1],
'members': ['[0]', '[1]', '[2]', '[3]']
},
'SecondaryColor': {
'types': ['b','ub','s','us','i','ui','f','d'],
'sizes': [3],
'default': [0,0,0],
'members': ['[0]', '[1]', '[2]']
},
'Normal': {
'types': ['b','s','i','f','d'],
'sizes': [3],
'default': [0,0,0],
'members': ['[0]', '[1]', '[2]']
},
'TexCoord': {
'types': ['s','i','f','d'],
'sizes': [1,2,3,4],
'default': [0,0,0,1],
'members': ['[0]', '[1]', '[2]', '[3]'],
'array': 'CR_MAX_TEXTURE_UNITS'
},
'EdgeFlag': {
'types': ['l'],
'sizes': [1],
'default': [1],
'members': ['[0]']
},
'Index': {
'types': ['ub','s','i','f','d'],
'sizes': [1],
'default': [0],
'members': ['[0]']
},
'VertexAttrib': {
'types': ['s','f','d','b','i','ub','ui','us','Nub','Nus','Nui','Nb','Ns','Ni'],
'sizes': [1,2,3,4],
'default': [0,0,0,1],
'members': ['x', 'y', 'z', 'w'],
'array': 'CR_MAX_VERTEX_ATTRIBS'
},
'FogCoord': {
'types': ['f','d'],
'sizes': [1],
'default': [0],
'members': []
},
}
current_vtx = {
'Vertex': {
'types': ['s','i','f','d'],
'sizes': [2,3,4],
'default': [0,0,0,1],
'members': ['x', 'y', 'z', 'w']
}
}
gltypes = {
'l': {
'type': 'GLboolean',
'size': 1
},
'b': {
'type': 'GLbyte',
'size': 1
},
'ub': {
'type': 'GLubyte',
'size': 1
},
's': {
'type': 'GLshort',
'size': 2
},
'us': {
'type': 'GLushort',
'size': 2
},
'i': {
'type': 'GLint',
'size': 4
},
'ui': {
'type': 'GLuint',
'size': 4
},
'f': {
'type': 'GLfloat',
'size': 4
},
'd': {
'type': 'GLdouble',
'size': 8
},
'Nb': {
'type': 'GLbyte',
'size': 1
},
'Nub': {
'type': 'GLubyte',
'size': 1
},
'Ns': {
'type': 'GLshort',
'size': 2
},
'Nus': {
'type': 'GLushort',
'size': 2
},
'Ni': {
'type': 'GLint',
'size': 4
},
'Nui': {
'type': 'GLuint',
'size': 4
}
}
| gpl-2.0 |
darksidelemm/alltheFSKs | DePacketizer.py | 1 | 7781 | #!/usr/bin/env python
# DePacktizer.py - Message DePacketizer
# As per https://docs.google.com/document/d/1fwUtzFUhTzwjHrbfUayRG5sM_3TzdPlPgWjwXnY8fsU/edit
#
# Absorbs bits (numpy arrays, strings, whatever), and emits packets, if found.
#
# Copyright 2014 Mark Jessop <mark.jessop@adelaide.edu.au>
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
import struct, crc16, logging, sys
import numpy as np
class DePacketizer(object):
""" Message DePacketizer Class """
def __init__(self, sync_bytes = '\xAB\xCD', payload_length_cap = 32, callback = False):
self.sync_bytes = sync_bytes
self.sync_length = len(sync_bytes) * 8
self.payload_length_cap = payload_length_cap
self.callback = callback
self.buffer_state = "APPEND" # "APPEND", while waiting for enough bits to attempt to extract a full packet,
# "SHIFT", for when we have reached our maximum buffer size, and can just clock through bits.
self.state = "NEED_MORE_DATA"
self.buffer = np.array([]).astype(np.uint8)
# Test buffer for sync bytes. If found, check for the rest of the packet, if enough bits are available.
def test_buffer(self):
if len(self.buffer)> self.sync_length + 16: # Allow for different sync lengths.
# Convert the first X bits to a string, and test against the sync bytes.
buffer_head = np.packbits(self.buffer[0:self.sync_length]).tostring()
if buffer_head == self.sync_bytes: # Maybe we have something?
# Extract the packet flags and payload length.
packet_flags = np.packbits(self.buffer[self.sync_length:self.sync_length+16]).tostring()
packet_flags = struct.unpack(">H", packet_flags)[0]
packet_length = packet_flags & 0x03FF # Extract just the packet length
if(packet_length > self.payload_length_cap):
# Payload is bigger than our cap.
# At this point we assume the data is corrupt, and continue clocking through bits.
logging.debug("Packet length bigger than cap.")
return
# Get the CRC type and length from the MSB of the packet flags.
crc_type = "CRC32" if (packet_flags & 0x8000 == 1) else "CRC16"
crc_length = 4 if crc_type == "CRC32" else 2
# Check we have enough bits to test the entire packet.
if(len(self.buffer) >= self.sync_length + 16 + packet_length*8 + crc_length*8):
# Convert the bit array to a string
packet_string = np.packbits(self.buffer[0:(self.sync_length + 16 + packet_length*8 + crc_length*8)]).tostring()
logging.debug("Possible Packet: " + packet_string)
logging.debug(str(np.packbits(self.buffer[0:(self.sync_length + 16 + packet_length*8 + crc_length*8)])))
# Extract CRC, and calc CRC
if crc_type == "CRC16":
calc_crc = crc16.crc16_buff(packet_string[len(self.sync_bytes):-2])
packet_crc = struct.unpack(">H",packet_string[-2:])[0]
else:
calc_crc = 0xFFFF
packet_crc = struct.unpack(">L", packet_string[-4:])[0]
logging.debug("Packet CRC: " + str(packet_crc) + " Calc CRC: "+ str(calc_crc))
# Test CRC
if packet_crc == calc_crc:
# Woohoo! We have a packet!
payload = packet_string[len(self.sync_bytes)+2:-2]
logging.info("Found complete packet: " + payload)
# Do somethign with the packet
if self.callback != False:
self.callback(payload)
# Clear the packet bits out of the buffer.
self.buffer_state = "SHIFT"
# TODO
else:
# Packet failed CRC. Continue clocking through bits in case this was a false positive.
logging.debug("CRC Check failed. False positive on sync?")
self.buffer_state = "SHIFT"
return
else:
# We need more bits. Make sure new bits are appended, so we don't shift out our sync header.
self.buffer_state = "APPEND"
return
else:
# No sync header match. Continue clocking bits through.
self.buffer_state = "SHIFT"
return
else:
# We need more bits to check the sync header.
self.buffer_state = "APPEND"
return
def process_bit(self,bit):
# This function only takes np.uint8's
if type(bit) != np.uint8:
bit = np.uint8(bit)
if bit != 0 and bit != 1: # This should never happen, but anyway...
return
# Now either append the bit to the buffer, or rotate the buffer left and add the bit to the end.
if self.buffer_state == "SHIFT":
self.buffer = np.roll(self.buffer,-1)
self.buffer[-1] = bit
elif self.buffer_state == "APPEND":
self.buffer = np.append(self.buffer, bit)
if len(self.buffer) == (self.payload_length_cap * 8 + 64): # If the buffer has reached the maximum size, switch to the "SHIFT" state.
logging.debug("Buffer full, now shifting data in.")
self.buffer_state = "SHIFT"
# Test the buffer for validity
self.test_buffer()
def process_data(self,data):
# Convert incoming data, whatever it is, to a numpy array of uint8's
if type(data) == np.ndarray:
data = data.astype(np.uint8)
elif type(data) == str:
data = np.unpackbits(np.fromstring(data ,dtype=np.uint8))
else:
return
logging.debug("Incoming Data: " + str(data))
for bit in data:
self.process_bit(bit)
# Test script.
if __name__ == "__main__":
# Set up logging to stdout instead of a file.
root = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
root.addHandler(ch)
root.setLevel(logging.DEBUG)
# Callback function. This is where you'd pass packts onto Habitat or whatever.
def print_payload(payload):
print payload
# Generate some packets, and intersperse random data between them.
import Packetizer as p
import random,string
p = p.Packetizer()
data = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(32))
data = data + p.pack_message("testing")
data2 = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(16))
data2 = data2 + p.pack_message("testing again")
# Pass data through the DePacketizer
dp = DePacketizer(callback=print_payload)
dp.process_data(data)
dp.process_data(data2)
| gpl-3.0 |
flibbertigibbet/ashlar | tests/settings_test.py | 2 | 2165 | """
Django settings for use when testing Ashlar
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
DEVELOP = True
PRODUCTION = False
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'testing!'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.gis',
'rest_framework',
'rest_framework_gis',
'django_filters',
'django_extensions',
'ashlar'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'ashlar.urls'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'postgres',
'HOST': 'db', # Defined in docker-compose.yml
'PORT': 5432,
'USER': 'postgres',
'PASSWORD': '',
'CONN_MAX_AGE': 3600, # in seconds
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/tmp'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.SessionAuthentication',),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10,
}
ASHLAR = {
# It is suggested to change this if you know that your data will be limited to
# a certain part of the world, for example to a UTM Grid projection or a state
# plane.
'SRID': 4326,
}
| mit |
leoliujie/odoo | addons/hr_timesheet/wizard/hr_timesheet_sign_in_out.py | 340 | 8789 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_so_project(osv.osv_memory):
_name = 'hr.sign.out.project'
_description = 'Sign Out By Project'
_columns = {
'account_id': fields.many2one('account.analytic.account', 'Project / Analytic Account', domain=[('type','=','normal')]),
'info': fields.char('Work Description', required=True),
'date_start': fields.datetime('Starting Date', readonly=True),
'date': fields.datetime('Closing Date'),
'analytic_amount': fields.float('Minimum Analytic Amount'),
'name': fields.char('Employee\'s Name', required=True, readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'server_date': fields.datetime('Current Date', required=True, readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def _get_empid(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
emp_ids = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_ids:
for employee in emp_obj.browse(cr, uid, emp_ids, context=context):
return {'name': employee.name, 'state': employee.state, 'emp_id': emp_ids[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')}
def _get_empid2(self, cr, uid, context=None):
res = self._get_empid(cr, uid, context=context)
cr.execute('select name,action from hr_attendance where employee_id=%s order by name desc limit 1', (res['emp_id'],))
res['server_date'] = time.strftime('%Y-%m-%d %H:%M:%S')
date_start = cr.fetchone()
if date_start:
res['date_start'] = date_start[0]
return res
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_so_project, self).default_get(cr, uid, fields_list, context=context)
res.update(self._get_empid2(cr, uid, context=context))
return res
def _write(self, cr, uid, data, emp_id, context=None):
timesheet_obj = self.pool.get('hr.analytic.timesheet')
emp_obj = self.pool.get('hr.employee')
if context is None:
context = {}
hour = (time.mktime(time.strptime(data['date'] or time.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')) -
time.mktime(time.strptime(data['date_start'], '%Y-%m-%d %H:%M:%S'))) / 3600.0
minimum = data['analytic_amount']
if minimum:
hour = round(round((hour + minimum / 2) / minimum) * minimum, 2)
res = timesheet_obj.default_get(cr, uid, ['product_id','product_uom_id'], context=context)
if not res['product_uom_id']:
raise osv.except_osv(_('User Error!'), _('Please define cost unit for this employee.'))
up = timesheet_obj.on_change_unit_amount(cr, uid, False, res['product_id'], hour,False, res['product_uom_id'])['value']
res['name'] = data['info']
res['account_id'] = data['account_id'].id
res['unit_amount'] = hour
emp_journal = emp_obj.browse(cr, uid, emp_id, context=context).journal_id
res['journal_id'] = emp_journal and emp_journal.id or False
res.update(up)
up = timesheet_obj.on_change_account_id(cr, uid, [], res['account_id']).get('value', {})
res.update(up)
return timesheet_obj.create(cr, uid, res, context=context)
def sign_out_result_end(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_out', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
def sign_out_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'action', 'action_date':data.date})
self._write(cr, uid, data, emp_id, context=context)
return {'type': 'ir.actions.act_window_close'}
class hr_si_project(osv.osv_memory):
_name = 'hr.sign.in.project'
_description = 'Sign In By Project'
_columns = {
'name': fields.char('Employee\'s Name', readonly=True),
'state': fields.related('emp_id', 'state', string='Current Status', type='selection', selection=[('present', 'Present'), ('absent', 'Absent')], required=True, readonly=True),
'date': fields.datetime('Starting Date'),
'server_date': fields.datetime('Current Date', readonly=True),
'emp_id': fields.many2one('hr.employee', 'Employee ID')
}
def view_init(self, cr, uid, fields, context=None):
"""
This function checks for precondition before wizard executes
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current userโs ID for security checks,
@param fields: List of fields for default value
@param context: A standard dictionary for contextual values
"""
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if not emp_id:
raise osv.except_osv(_('User Error!'), _('Please define employee for your user.'))
return False
def check_state(self, cr, uid, ids, context=None):
obj_model = self.pool.get('ir.model.data')
emp_id = self.default_get(cr, uid, ['emp_id'], context)['emp_id']
# get the latest action (sign_in or out) for this employee
cr.execute('select action from hr_attendance where employee_id=%s and action in (\'sign_in\',\'sign_out\') order by name desc limit 1', (emp_id,))
res = (cr.fetchone() or ('sign_out',))[0]
in_out = (res == 'sign_out') and 'in' or 'out'
#TODO: invert sign_in et sign_out
model_data_ids = obj_model.search(cr,uid,[('model','=','ir.ui.view'),('name','=','view_hr_timesheet_sign_%s' % in_out)], context=context)
resource_id = obj_model.read(cr, uid, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Sign in / Sign out'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'hr.sign.%s.project' % in_out,
'views': [(resource_id,'form')],
'type': 'ir.actions.act_window',
'target': 'new'
}
def sign_in_result(self, cr, uid, ids, context=None):
emp_obj = self.pool.get('hr.employee')
for data in self.browse(cr, uid, ids, context=context):
emp_id = data.emp_id.id
emp_obj.attendance_action_change(cr, uid, [emp_id], {'action':'sign_in', 'action_date':data.date})
return {'type': 'ir.actions.act_window_close'}
def default_get(self, cr, uid, fields_list, context=None):
res = super(hr_si_project, self).default_get(cr, uid, fields_list, context=context)
emp_obj = self.pool.get('hr.employee')
emp_id = emp_obj.search(cr, uid, [('user_id', '=', uid)], context=context)
if emp_id:
for employee in emp_obj.browse(cr, uid, emp_id, context=context):
res.update({'name': employee.name, 'state': employee.state, 'emp_id': emp_id[0], 'server_date':time.strftime('%Y-%m-%d %H:%M:%S')})
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mikegraham/dask | dask/dataframe/tests/test_io.py | 1 | 34901 | import gzip
import pandas as pd
import numpy as np
import pandas.util.testing as tm
import os
import dask
import pytest
from threading import Lock
import shutil
from time import sleep
import threading
import dask.array as da
import dask.dataframe as dd
from dask.dataframe.io import (from_array, from_bcolz, from_dask_array)
from dask.utils import filetext, filetexts, tmpfile, tmpdir
from dask.async import get_sync
from dask.dataframe.utils import eq
########
# CSVS #
########
text = """
name,amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
Alice,200
Frank,-200
Bob,600
Alice,400
Frank,200
Alice,300
Edith,600
""".strip()
def test_read_csv():
with filetext(text) as fn:
f = dd.read_csv(fn, chunkbytes=30, lineterminator='\n')
assert list(f.columns) == ['name', 'amount']
assert f._known_dtype
result = f.compute(get=dask.get)
# index may be different
assert eq(result.reset_index(drop=True),
pd.read_csv(fn, lineterminator='\n'))
def test_read_multiple_csv():
try:
with open('_foo.1.csv', 'w') as f:
f.write(text)
with open('_foo.2.csv', 'w') as f:
f.write(text)
df = dd.read_csv('_foo.*.csv', chunkbytes=30)
assert df._known_dtype
assert df.npartitions > 2
assert (len(dd.read_csv('_foo.*.csv').compute()) ==
len(dd.read_csv('_foo.1.csv').compute()) * 2)
finally:
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
def normalize_text(s):
return '\n'.join(map(str.strip, s.strip().split('\n')))
def test_consistent_dtypes():
text = normalize_text("""
name,amount
Alice,100.5
Bob,-200.5
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, chunkbytes=30)
assert isinstance(df.amount.sum().compute(), float)
assert df._known_dtype
datetime_csv_file = """
name,amount,when
Alice,100,2014-01-01
Bob,200,2014-01-01
Charlie,300,2014-01-01
Dan,400,2014-01-01
""".strip()
def test_read_csv_index():
with filetext(text) as fn:
f = dd.read_csv(fn, chunkbytes=20).set_index('amount')
assert f._known_dtype
result = f.compute(get=get_sync)
assert result.index.name == 'amount'
blocks = dd.DataFrame._get(f.dask, f._keys(), get=get_sync)
for i, block in enumerate(blocks):
if i < len(f.divisions) - 2:
assert (block.index < f.divisions[i + 1]).all()
if i > 0:
assert (block.index >= f.divisions[i]).all()
expected = pd.read_csv(fn).set_index('amount')
assert eq(result, expected)
def test_usecols():
with filetext(datetime_csv_file) as fn:
df = dd.read_csv(fn, chunkbytes=30, usecols=['when', 'amount'])
expected = pd.read_csv(fn, usecols=['when', 'amount'])
assert (df.compute().values == expected.values).all()
####################
# Arrays and BColz #
####################
def test_dummy_from_array():
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
res = dd.io._dummy_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res[0].dtype == np.int64
assert res[1].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index([0, 1]))
x = np.array([[1., 2.], [3., 4.]], dtype=np.float64)
res = dd.io._dummy_from_array(x, columns=['a', 'b'])
assert isinstance(res, pd.DataFrame)
assert res['a'].dtype == np.float64
assert res['b'].dtype == np.float64
tm.assert_index_equal(res.columns, pd.Index(['a', 'b']))
msg = r"""Length mismatch: Expected axis has 2 elements, new values have 3 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
dd.io._dummy_from_array(x, columns=['a', 'b', 'c'])
np.random.seed(42)
x = np.random.rand(201, 2)
x = from_array(x, chunksize=50, columns=['a', 'b'])
assert len(x.divisions) == 6 # Should be 5 partitions and the end
def test_dummy_from_1darray():
x = np.array([1., 2., 3.], dtype=np.float64)
res = dd.io._dummy_from_array(x)
assert isinstance(res, pd.Series)
assert res.dtype == np.float64
x = np.array([1, 2, 3], dtype=np.object_)
res = dd.io._dummy_from_array(x, columns='x')
assert isinstance(res, pd.Series)
assert res.name == 'x'
assert res.dtype == np.object_
x = np.array([1, 2, 3], dtype=np.object_)
res = dd.io._dummy_from_array(x, columns=['x'])
assert isinstance(res, pd.DataFrame)
assert res['x'].dtype == np.object_
tm.assert_index_equal(res.columns, pd.Index(['x']))
msg = r"""Length mismatch: Expected axis has 1 elements, new values have 2 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
dd.io._dummy_from_array(x, columns=['a', 'b'])
def test_dummy_from_recarray():
x = np.array([(i, i*10) for i in range(10)],
dtype=[('a', np.float64), ('b', np.int64)])
res = dd.io._dummy_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res['a'].dtype == np.float64
assert res['b'].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(['a', 'b']))
res = dd.io._dummy_from_array(x, columns=['x', 'y'])
assert isinstance(res, pd.DataFrame)
assert res['x'].dtype == np.float64
assert res['y'].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(['x', 'y']))
msg = r"""Length mismatch: Expected axis has 2 elements, new values have 3 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
dd.io._dummy_from_array(x, columns=['a', 'b', 'c'])
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert d._known_dtype
tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list('abc'))
assert isinstance(d, dd.DataFrame)
assert d._known_dtype
tm.assert_index_equal(d.columns, pd.Index(['a', 'b', 'c']))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
with pytest.raises(ValueError):
dd.from_array(np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i*10) for i in range(10)],
dtype=[('a', 'i4'), ('b', 'i4')])
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert d._known_dtype
assert list(d.columns) == ['a', 'b']
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz_multiple_threads():
bcolz = pytest.importorskip('bcolz')
def check():
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) ==
sorted(dd.from_bcolz(t, chunksize=2).dask))
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dd.from_bcolz(t, chunksize=3).dask))
threads = []
for i in range(5):
thread = threading.Thread(target=check)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def test_from_bcolz():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
d = dd.from_bcolz(t, chunksize=2)
assert d._known_dtype
assert d.npartitions == 2
assert str(d.dtypes['a']) == 'category'
assert list(d.x.compute(get=get_sync)) == [1, 2, 3]
assert list(d.a.compute(get=get_sync)) == ['a', 'b', 'a']
L = list(d.index.compute(get=get_sync))
assert L == [0, 1, 2]
d = dd.from_bcolz(t, chunksize=2, index='x')
L = list(d.index.compute(get=get_sync))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) ==
sorted(dd.from_bcolz(t, chunksize=2).dask))
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dd.from_bcolz(t, chunksize=3).dask))
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4., 'b'))
t.flush()
assert (sorted(dd.from_bcolz(t, chunksize=2).dask) !=
sorted(dsk))
def test_from_bcolz_no_lock():
bcolz = pytest.importorskip('bcolz')
locktype = type(Lock())
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'], chunklen=2)
a = dd.from_bcolz(t, chunksize=2)
b = dd.from_bcolz(t, chunksize=2, lock=True)
c = dd.from_bcolz(t, chunksize=2, lock=False)
eq(a, b)
eq(a, c)
assert not any(isinstance(item, locktype)
for v in c.dask.values()
for item in v)
def test_from_bcolz_filename():
bcolz = pytest.importorskip('bcolz')
with tmpfile('.bcolz') as fn:
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'],
rootdir=fn)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_from_bcolz_column_order():
bcolz = pytest.importorskip('bcolz')
t = bcolz.ctable([[1, 2, 3], [1., 2., 3.], ['a', 'b', 'a']],
names=['x', 'y', 'a'])
df = dd.from_bcolz(t, chunksize=2)
assert list(df.loc[0].compute().columns) == ['x', 'y', 'a']
def test_skipinitialspace():
text = normalize_text("""
name, amount
Alice,100
Bob,-200
Charlie,300
Dennis,400
Edith,-500
Frank,600
""")
with filetext(text) as fn:
df = dd.read_csv(fn, skipinitialspace=True, chunkbytes=20)
assert 'amount' in df.columns
assert df.amount.max().compute() == 600
def test_consistent_dtypes_2():
text1 = normalize_text("""
name,amount
Alice,100
Bob,-200
Charlie,300
""")
text2 = normalize_text("""
name,amount
1,400
2,-500
Frank,600
""")
try:
with open('_foo.1.csv', 'w') as f:
f.write(text1)
with open('_foo.2.csv', 'w') as f:
f.write(text2)
df = dd.read_csv('_foo.*.csv', chunkbytes=25)
assert df.amount.max().compute() == 600
finally:
pass
os.remove('_foo.1.csv')
os.remove('_foo.2.csv')
@pytest.mark.slow
def test_compression_multiple_files():
with tmpdir() as tdir:
f = gzip.open(os.path.join(tdir, 'a.csv.gz'), 'wb')
f.write(text.encode())
f.close()
f = gzip.open(os.path.join(tdir, 'b.csv.gz'), 'wb')
f.write(text.encode())
f.close()
df = dd.read_csv(os.path.join(tdir, '*.csv.gz'), compression='gzip')
assert len(df.compute()) == (len(text.split('\n')) - 1) * 2
def test_empty_csv_file():
with filetext('a,b') as fn:
df = dd.read_csv(fn, header=0)
assert len(df.compute()) == 0
assert list(df.columns) == ['a', 'b']
def test_from_pandas_dataframe():
a = list('aaaaaaabbbbbbbbccccccc')
df = pd.DataFrame(dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start='20120101', periods=len(a)))
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert type(ddf.divisions[0]) == type(df.index[0])
tm.assert_frame_equal(df, ddf.compute())
ddf = dd.from_pandas(df, chunksize=8)
msg = 'Exactly one of npartitions and chunksize must be specified.'
with tm.assertRaisesRegexp(ValueError, msg):
dd.from_pandas(df, npartitions=2, chunksize=2)
with tm.assertRaisesRegexp(ValueError, msg):
dd.from_pandas(df)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert type(ddf.divisions[0]) == type(df.index[0])
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({'x': [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
a = dd.from_pandas(df, chunksize=i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
@pytest.mark.xfail(reason="")
def test_from_pandas_npartitions_is_accurate():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
for n in [1, 2, 4, 5]:
assert dd.from_pandas(df, npartitions=n).npartitions == n
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n),
index=pd.date_range(start='20120101', periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert type(ds.divisions[0]) == type(s.index[0])
tm.assert_series_equal(s, ds.compute())
ds = dd.from_pandas(s, chunksize=8)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert type(ds.divisions[0]) == type(s.index[0])
tm.assert_series_equal(s, ds.compute())
def test_from_pandas_non_sorted():
df = pd.DataFrame({'x': [1, 2, 3]}, index=[3, 1, 2])
ddf = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2, sort=False)
assert not ddf.known_divisions
eq(df, ddf)
def test_from_pandas_single_row():
df = pd.DataFrame({'x': [1]}, index=[1])
ddf = dd.from_pandas(df, npartitions=1)
assert ddf.divisions == (1, 1)
assert eq(ddf, df)
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = from_dask_array(x, ['a', 'b', 'c'])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df.columns, pd.Index(['a', 'b', 'c']))
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(get=get_sync).values == x.compute(get=get_sync)).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=['a', 'b', 'c'])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df2.columns, df.columns)
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = from_dask_array(x, 'a')
assert isinstance(ser, dd.Series)
assert ser.name == 'a'
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(get=get_sync).values == x.compute(get=get_sync)).all()
ser = from_dask_array(x)
assert isinstance(ser, dd.Series)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert isinstance(ser2, dd.Series)
assert eq(ser, ser2)
def test_from_dask_array_compat_numpy_array():
x = da.ones((3, 3, 3), chunks=2)
msg = r"from_array does not input more than 2D array, got array with shape \(3, 3, 3\)"
with tm.assertRaisesRegexp(ValueError, msg):
from_dask_array(x) # dask
with tm.assertRaisesRegexp(ValueError, msg):
from_array(x.compute()) # numpy
x = da.ones((10, 3), chunks=(3, 3))
d1 = from_dask_array(x) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))
d2 = from_array(x.compute()) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))
msg = r"""Length mismatch: Expected axis has 3 elements, new values have 1 elements"""
with tm.assertRaisesRegexp(ValueError, msg):
from_dask_array(x, columns=['a']) # dask
with tm.assertRaisesRegexp(ValueError, msg):
from_array(x.compute(), columns=['a']) # numpy
d1 = from_dask_array(x, columns=['a', 'b', 'c']) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(['a', 'b', 'c']))
d2 = from_array(x.compute(), columns=['a', 'b', 'c']) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(['a', 'b', 'c']))
def test_from_dask_array_compat_numpy_array_1d():
x = da.ones(10, chunks=3)
d1 = from_dask_array(x) # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name is None
d2 = from_array(x.compute()) # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name is None
d1 = from_dask_array(x, columns='name') # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name == 'name'
d2 = from_array(x.compute(), columns='name') # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name == 'name'
# passing list via columns results in DataFrame
d1 = from_dask_array(x, columns=['name']) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(['name']))
d2 = from_array(x.compute(), columns=['name']) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(['name']))
def test_from_dask_array_struct_dtype():
x = np.array([(1, 'a'), (2, 'b')], dtype=[('a', 'i4'), ('b', 'object')])
y = da.from_array(x, chunks=(1,))
df = dd.from_dask_array(y)
tm.assert_index_equal(df.columns, pd.Index(['a', 'b']))
assert eq(df, pd.DataFrame(x))
assert eq(dd.from_dask_array(y, columns=['b', 'a']),
pd.DataFrame(x, columns=['b', 'a']))
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_to_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
b = c.to_dask()
try:
tm.assert_frame_equal(df, c[:])
tm.assert_frame_equal(b.compute(), df)
finally:
c.drop()
c = a.to_castra(categories=['x'])
try:
assert c[:].dtypes['x'] == 'category'
finally:
c.drop()
c = a.to_castra(sorted_index_column='y')
try:
tm.assert_frame_equal(c[:], df.set_index('y'))
finally:
c.drop()
dsk, keys = a.to_castra(compute=False)
assert isinstance(dsk, dict)
assert isinstance(keys, list)
c, last = keys
assert last[1] == a.npartitions - 1
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_from_castra():
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
c = a.to_castra()
with_castra = dd.from_castra(c)
with_fn = dd.from_castra(c.path)
with_columns = dd.from_castra(c, 'x')
try:
tm.assert_frame_equal(df, with_castra.compute())
tm.assert_frame_equal(df, with_fn.compute())
tm.assert_series_equal(df.x, with_columns.compute())
finally:
# Calling c.drop() is a race condition on drop from `with_fn.__del__`
# and c.drop. Manually `del`ing gets around this.
del with_fn, c
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_from_castra_with_selection():
""" Optimizations fuse getitems with load_partitions
We used to use getitem for both column access and selections
"""
pytest.importorskip('castra')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = dd.from_castra(a.to_castra())
assert eq(b[b.y > 3].x, df[df.y > 3].x)
def test_to_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
with tmpfile('h5') as fn:
a.x.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_series_equal(df.x, out[:])
a = dd.from_pandas(df, 1)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data')
out = pd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out[:])
# saving to multiple datasets
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data*')
out = dd.read_hdf(fn, '/data*')
tm.assert_frame_equal(df, out.compute())
# saving to multiple files
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data')
out = dd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out.compute())
# saving to multiple datasets with custom name_function
a = dd.from_pandas(df, 2)
with tmpfile('h5') as fn:
a.to_hdf(fn, '/data_*', name_function=lambda i: 'a' * (i + 1))
out = dd.read_hdf(fn, '/data_*')
tm.assert_frame_equal(df, out.compute())
out = pd.read_hdf(fn, '/data_a')
tm.assert_frame_equal(out, df.iloc[:2])
out = pd.read_hdf(fn, '/data_aa')
tm.assert_frame_equal(out, df.iloc[2:])
# saving to multiple files with custom name_function
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data', name_function=lambda i: 'a' * (i + 1))
out = dd.read_hdf(fn, '/data')
tm.assert_frame_equal(df, out.compute())
out = pd.read_hdf(os.path.join(dn, 'data_a.h5'), '/data')
tm.assert_frame_equal(out, df.iloc[:2])
out = pd.read_hdf(os.path.join(dn, 'data_aa.h5'), '/data')
tm.assert_frame_equal(out, df.iloc[2:])
# saving to different datasets in multiple files with custom name_function
a = dd.from_pandas(df, 2)
with tmpdir() as dn:
with pytest.raises(ValueError):
fn = os.path.join(dn, 'data_*.h5')
a.to_hdf(fn, '/data_*', name_function=lambda i: 'a' * (i + 1))
def test_read_hdf():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data')
try:
dd.read_hdf(fn, 'data', chunksize=2)
assert False
except TypeError as e:
assert "format='table'" in str(e)
with tmpfile('h5') as fn:
df.to_hdf(fn, '/data', format='table')
a = dd.read_hdf(fn, '/data', chunksize=2)
assert a.npartitions == 2
assert a._known_dtype
tm.assert_frame_equal(a.compute(), df)
tm.assert_frame_equal(
dd.read_hdf(fn, '/data', chunksize=2, start=1, stop=3).compute(),
pd.read_hdf(fn, '/data', start=1, stop=3))
assert (sorted(dd.read_hdf(fn, '/data').dask) ==
sorted(dd.read_hdf(fn, '/data').dask))
def test_to_csv():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn)
result = pd.read_csv(fn, index_col=0)
tm.assert_frame_equal(result, df)
@pytest.mark.xfail(reason="bloscpack BLOSC_MAX_BUFFERSIZE")
def test_to_csv_gzip():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
for npartitions in [1, 2]:
a = dd.from_pandas(df, npartitions)
with tmpfile('csv') as fn:
a.to_csv(fn, compression='gzip')
result = pd.read_csv(fn, index_col=0, compression='gzip')
tm.assert_frame_equal(result, df)
def test_to_csv_series():
s = pd.Series([1, 2, 3], index=[10, 20, 30], name='foo')
a = dd.from_pandas(s, 2)
with tmpfile('csv') as fn:
with tmpfile('csv') as fn2:
a.to_csv(fn)
s.to_csv(fn2)
with open(fn) as f:
adata = f.read()
with open(fn2) as f:
sdata = f.read()
assert adata == sdata
def test_read_csv_with_nrows():
with filetext(text) as fn:
f = dd.read_csv(fn, nrows=3)
assert list(f.columns) == ['name', 'amount']
assert f.npartitions == 1
assert eq(dd.read_csv(fn, nrows=3), pd.read_csv(fn, nrows=3))
def test_read_csv_raises_on_no_files():
fn = '.not.a.real.file.csv'
try:
dd.read_csv(fn)
assert False
except IOError as e:
assert fn in str(e)
def test_read_csv_has_deterministic_name():
with filetext(text) as fn:
a = dd.read_csv(fn)
b = dd.read_csv(fn)
assert a._name == b._name
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
assert isinstance(a._name, str)
c = dd.read_csv(fn, skiprows=1, na_values=[0])
assert a._name != c._name
def test_multiple_read_csv_has_deterministic_name():
with filetexts({'_foo.1.csv': text, '_foo.2.csv': text}):
a = dd.read_csv('_foo.*.csv')
b = dd.read_csv('_foo.*.csv')
assert sorted(a.dask.keys(), key=str) == sorted(b.dask.keys(), key=str)
def test_csv_with_integer_names():
with filetext('alice,1\nbob,2') as fn:
df = dd.read_csv(fn, header=None)
assert list(df.columns) == [0, 1]
@pytest.mark.slow
def test_read_csv_of_modified_file_has_different_name():
with filetext(text) as fn:
sleep(1)
a = dd.read_csv(fn)
sleep(1)
with open(fn, 'a') as f:
f.write('\nGeorge,700')
os.fsync(f)
b = dd.read_csv(fn)
assert sorted(a.dask) != sorted(b.dask)
def test_to_bag():
pytest.importorskip('dask.bag')
a = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute(get=get_sync) == list(a.itertuples(False))
assert ddf.to_bag(True).compute(get=get_sync) == list(a.itertuples(True))
assert ddf.x.to_bag(True).compute(get=get_sync) == list(a.x.iteritems())
assert ddf.x.to_bag().compute(get=get_sync) == list(a.x)
@pytest.mark.xfail(reason='we might want permissive behavior here')
def test_report_dtype_correction_on_csvs():
text = 'numbers,names\n'
for i in range(1000):
text += '1,foo\n'
text += '1.5,bar\n'
with filetext(text) as fn:
with pytest.raises(ValueError) as e:
dd.read_csv(fn).compute(get=get_sync)
assert "'numbers': 'float64'" in str(e)
def test_hdf_globbing():
pytest.importorskip('tables')
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [1, 2, 3, 4]}, index=[1., 2., 3., 4.])
with tmpdir() as tdir:
df.to_hdf(os.path.join(tdir, 'one.h5'), '/foo/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/bar/data', format='table')
df.to_hdf(os.path.join(tdir, 'two.h5'), '/foo/data', format='table')
with dask.set_options(get=dask.get):
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2)
assert res.npartitions == 2
tm.assert_frame_equal(res.compute(), df)
res = dd.read_hdf(os.path.join(tdir, 'one.h5'), '/*/data',
chunksize=2, start=1, stop=3)
expected = pd.read_hdf(os.path.join(tdir, 'one.h5'), '/foo/data',
start=1, stop=3)
tm.assert_frame_equal(res.compute(), expected)
res = dd.read_hdf(os.path.join(tdir, 'two.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/foo/data', chunksize=2)
assert res.npartitions == 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 2))
res = dd.read_hdf(os.path.join(tdir, '*.h5'), '/*/data', chunksize=2)
assert res.npartitions == 2 + 2 + 2
tm.assert_frame_equal(res.compute(), pd.concat([df] * 3))
def test_index_col():
with filetext(text) as fn:
try:
f = dd.read_csv(fn, chunkbytes=30, index_col='name')
assert False
except ValueError as e:
assert 'set_index' in str(e)
timeseries = """
Date,Open,High,Low,Close,Volume,Adj Close
2015-08-28,198.50,199.839996,197.919998,199.240005,143298900,199.240005
2015-08-27,197.020004,199.419998,195.210007,199.160004,266244700,199.160004
2015-08-26,192.080002,194.789993,188.369995,194.679993,328058100,194.679993
2015-08-25,195.429993,195.449997,186.919998,187.229996,353966700,187.229996
2015-08-24,197.630005,197.630005,182.399994,189.550003,478672400,189.550003
2015-08-21,201.729996,203.940002,197.520004,197.630005,328271500,197.630005
2015-08-20,206.509995,208.289993,203.899994,204.009995,185865600,204.009995
2015-08-19,209.089996,210.009995,207.350006,208.279999,167316300,208.279999
2015-08-18,210.259995,210.679993,209.699997,209.929993,70043800,209.929993
""".strip()
def test_read_csv_with_datetime_index_partitions_one():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# chunkbytes set to explicitly set to single chunk
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date'],
chunkbytes=10000000).set_index('Date')
eq(df, ddf)
# because fn is so small, by default, this will only be one chunk
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date']).set_index('Date')
eq(df, ddf)
def test_read_csv_with_datetime_index_partitions_n():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
# because fn is so small, by default, set chunksize small
ddf = dd.read_csv(fn, header=0, usecols=[0, 4],
parse_dates=['Date'],
chunkbytes=400).set_index('Date')
eq(df, ddf)
def test_from_pandas_with_datetime_index():
with filetext(timeseries) as fn:
df = pd.read_csv(fn, index_col=0, header=0, usecols=[0, 4],
parse_dates=['Date'])
ddf = dd.from_pandas(df, 2)
eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2)
eq(df, ddf)
@pytest.mark.parametrize('encoding', ['utf-16', 'utf-16-le', 'utf-16-be'])
def test_encoding_gh601(encoding):
ar = pd.Series(range(0, 100))
br = ar % 7
cr = br * 3.3
dr = br / 1.9836
test_df = pd.DataFrame({'a': ar, 'b': br, 'c': cr, 'd': dr})
with tmpfile('.csv') as fn:
test_df.to_csv(fn, encoding=encoding, index=False)
a = pd.read_csv(fn, encoding=encoding)
d = dd.read_csv(fn, encoding=encoding, chunkbytes=1000)
d = d.compute()
d.index = range(len(d.index))
assert eq(d, a)
def test_read_hdf_doesnt_segfault():
pytest.importorskip('tables')
with tmpfile('h5') as fn:
N = 40
df = pd.DataFrame(np.random.randn(N, 3))
with pd.HDFStore(fn, mode='w') as store:
store.append('/x', df)
ddf = dd.read_hdf(fn, '/x', chunksize=2)
assert len(ddf) == N
def test_read_csv_header_issue_823():
text = '''a b c-d\n1 2 3\n4 5 6'''.replace(' ', '\t')
with filetext(text) as fn:
df = dd.read_csv(fn, sep='\t')
eq(df, pd.read_csv(fn, sep='\t'))
df = dd.read_csv(fn, delimiter='\t')
eq(df, pd.read_csv(fn, delimiter='\t'))
def test_none_usecols():
with filetext(text) as fn:
df = dd.read_csv(fn, usecols=None)
eq(df, pd.read_csv(fn, usecols=None))
pdmc_text = """
ID,date,time
10,2003-11-04,180036
11,2003-11-05,125640
12,2003-11-01,2519
13,2003-10-22,142559
14,2003-10-24,163113
15,2003-10-20,170133
16,2003-11-11,160448
17,2003-11-03,171759
18,2003-11-07,190928
19,2003-10-21,84623
20,2003-10-25,192207
21,2003-11-13,180156
22,2003-11-15,131037
""".strip()
def test_parse_dates_multi_column():
with filetext(pdmc_text) as fn:
ddf = dd.read_csv(fn, parse_dates=[['date', 'time']])
df = pd.read_csv(fn, parse_dates=[['date', 'time']])
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
sep_text = """
name###amount
alice###100
bob###200
charlie###300"""
def test_read_csv_sep():
with filetext(sep_text) as fn:
ddf = dd.read_csv(fn, sep="###")
df = pd.read_csv(fn, sep="###")
assert (df.columns == ddf.columns).all()
assert len(df) == len(ddf)
def test_to_hdf_kwargs():
df = pd.DataFrame({'A': ['a', 'aaaa']})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_hdf('tst.h5', 'foo4', format='table', min_itemsize=4)
df2 = pd.read_hdf('tst.h5', 'foo4')
tm.assert_frame_equal(df, df2)
def test_read_csv_slash_r():
data = b'0,my\n1,data\n' * 1000 + b'2,foo\rbar'
with filetext(data, mode='wb') as fn:
dd.read_csv(fn, header=None, sep=',', lineterminator='\n',
names=['a','b'], blocksize=200).compute(get=dask.get)
def test_read_csv_singleton_dtype():
data = b'a,b\n1,2\n3,4\n5,6'
with filetext(data, mode='wb') as fn:
eq(pd.read_csv(fn, dtype=float),
dd.read_csv(fn, dtype=float))
| bsd-3-clause |
SirNicolas/python-telegram-bot | docs/source/conf.py | 2 | 9334 | # -*- coding: utf-8 -*-
#
# Python Telegram Bot documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 10 22:25:07 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Telegram Bot'
copyright = u'2015, Leandro Toledo'
author = u'Leandro Toledo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.8'
# The full version, including alpha/beta/rc tags.
release = '2.8.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonTelegramBotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PythonTelegramBot.tex', u'Python Telegram Bot Documentation',
u'Leandro Toledo', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pythontelegrambot', u'Python Telegram Bot Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PythonTelegramBot', u'Python Telegram Bot Documentation',
author, 'PythonTelegramBot', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/src/twisted/internet/test/_posixifaces.py | 47 | 4689 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
POSIX implementation of local network interface enumeration.
"""
from __future__ import division, absolute_import
import sys, socket
from socket import AF_INET, AF_INET6, inet_ntop
from ctypes import (
CDLL, POINTER, Structure, c_char_p, c_ushort, c_int,
c_uint32, c_uint8, c_void_p, c_ubyte, pointer, cast)
from ctypes.util import find_library
from twisted.python.compat import _PY3, nativeString
if _PY3:
# Once #6070 is implemented, this can be replaced with the implementation
# from that ticket:
def chr(i):
"""
Python 3 implementation of Python 2 chr(), i.e. convert an integer to
corresponding byte.
"""
return bytes([i])
libc = CDLL(find_library("c"))
if sys.platform.startswith('freebsd') or sys.platform == 'darwin':
_sockaddrCommon = [
("sin_len", c_uint8),
("sin_family", c_uint8),
]
else:
_sockaddrCommon = [
("sin_family", c_ushort),
]
class in_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 4),
]
class in6_addr(Structure):
_fields_ = [
("in_addr", c_ubyte * 16),
]
class sockaddr(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
]
class sockaddr_in(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_addr", in_addr),
]
class sockaddr_in6(Structure):
_fields_ = _sockaddrCommon + [
("sin_port", c_ushort),
("sin_flowinfo", c_uint32),
("sin_addr", in6_addr),
]
class ifaddrs(Structure):
pass
ifaddrs_p = POINTER(ifaddrs)
ifaddrs._fields_ = [
('ifa_next', ifaddrs_p),
('ifa_name', c_char_p),
('ifa_flags', c_uint32),
('ifa_addr', POINTER(sockaddr)),
('ifa_netmask', POINTER(sockaddr)),
('ifa_dstaddr', POINTER(sockaddr)),
('ifa_data', c_void_p)]
getifaddrs = libc.getifaddrs
getifaddrs.argtypes = [POINTER(ifaddrs_p)]
getifaddrs.restype = c_int
freeifaddrs = libc.freeifaddrs
freeifaddrs.argtypes = [ifaddrs_p]
def _maybeCleanupScopeIndex(family, packed):
"""
On FreeBSD, kill the embedded interface indices in link-local scoped
addresses.
@param family: The address family of the packed address - one of the
I{socket.AF_*} constants.
@param packed: The packed representation of the address (ie, the bytes of a
I{in_addr} field).
@type packed: L{bytes}
@return: The packed address with any FreeBSD-specific extra bits cleared.
@rtype: L{bytes}
@see: U{https://twistedmatrix.com/trac/ticket/6843}
@see: U{http://www.freebsd.org/doc/en/books/developers-handbook/ipv6.html#ipv6-scope-index}
@note: Indications are that the need for this will be gone in FreeBSD >=10.
"""
if sys.platform.startswith('freebsd') and packed[:2] == b"\xfe\x80":
return packed[:2] + b"\x00\x00" + packed[4:]
return packed
def _interfaces():
"""
Call C{getifaddrs(3)} and return a list of tuples of interface name, address
family, and human-readable address representing its results.
"""
ifaddrs = ifaddrs_p()
if getifaddrs(pointer(ifaddrs)) < 0:
raise OSError()
results = []
try:
while ifaddrs:
if ifaddrs[0].ifa_addr:
family = ifaddrs[0].ifa_addr[0].sin_family
if family == AF_INET:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in))
elif family == AF_INET6:
addr = cast(ifaddrs[0].ifa_addr, POINTER(sockaddr_in6))
else:
addr = None
if addr:
packed = b''.join(map(chr, addr[0].sin_addr.in_addr[:]))
packed = _maybeCleanupScopeIndex(family, packed)
results.append((
ifaddrs[0].ifa_name,
family,
inet_ntop(family, packed)))
ifaddrs = ifaddrs[0].ifa_next
finally:
freeifaddrs(ifaddrs)
return results
def posixGetLinkLocalIPv6Addresses():
"""
Return a list of strings in colon-hex format representing all the link local
IPv6 addresses available on the system, as reported by I{getifaddrs(3)}.
"""
retList = []
for (interface, family, address) in _interfaces():
interface = nativeString(interface)
address = nativeString(address)
if family == socket.AF_INET6 and address.startswith('fe80:'):
retList.append('%s%%%s' % (address, interface))
return retList
| mit |
jarryliu/queue-sim | plot/draw.py | 1 | 15784 | #!/usr/local/bin/python
from math import factorial, exp
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d, Axes3D #<-- Note the capitalization!
import sys
intList = [5000, 1000, 500, 100, 50, 10]
trate = [1000000.0/i for i in intList]
cpuList = [1.0, 4.2, 8.0, 26.9, 38.7, 52]
rate = [0.314, 1.45, 2.72, 9.42, 13.6, 21.1]
rate = [r*1000 for r in rate]
latency = [8.1924e+03, 7.9385e+03, 7.8343e+03, 8.1685e+03, 8.6729e+03, 8.6729e+03 ]
latency = [l/1000000.0 for l in latency]
#plt.plot(trate, cpuList, 'r-.')
plt.figure(1)
plt.subplot(211)
plt.plot(rate, cpuList, 'bo-')
plt.ylabel("CPU utilization (%)")
plt.xlabel("Message rate (Kbps)")
plt.subplot(212)
plt.plot(rate, latency, 'ro-')
plt.ylim(0, 0.01)
plt.ylabel("Latency (ms)")
plt.xlabel("Message rate (Kbps)")
plt.show()
sys.exit()
#from mpl_toolkits.mplot3d import Axes3D
#from theory import getDelay, getLatency, totalDelay
# bucket = np.arange(1,21)
# bresult= [0.00409438016142, 0.0033155469912, 0.00267805247694, 0.00217196080862, 0.00179592654568,
# 0.00143718393687, 0.00116060379269, 0.000978849410248, 0.000755804749056, 0.000629652721451,
# 0.000509918882204, 0.000438399316067, 0.000338310877662, 0.000280665269416, 0.000244070153101,
# 0.000172161374231, 0.000149499687789, 0.000121459034788, 9.30199199637e-05, 7.75854592678e-05]
#
# dlist = []
# for i in bucket:
# dlist.append(getDelay(0.9,i))
# plt.plot(bucket, dlist, "-")
# plt.plot(bucket, np.array(bresult)*1000, 'o')
#
#
# legendList = ['theory', 'simulation']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('bucket size')
# plt.ylabel('average latency (ms)')
# plt.show()
#
#
# rate = range(1000, 0, -100)
# rresult = [0.000644522106328, 0.000720025905961, 0.000833121678584, 0.000895596093789, 0.00101505313479, 0.00128537828299, 0.0015555967225, 0.00209048499208, 0.00313702591988, 0.00616596723663]
#
# d = getDelay(0.9,10)
# dlist = [d/(0.1*(10-i)) for i in xrange(10)]
# plt.plot(rate, dlist, "-")
# plt.plot(rate, np.array(rresult)*1000, 'o')
#
#
# legendList = ['theory', 'simulation']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('bucket rate')
# plt.ylabel('average latency (ms)')
# plt.show()
# interval = [0.1, 0.2, 0.5, 1, 2, 5, 10, 20]
# possion_result = [0.00131450177183, 0.0015070228446, 0.0016599388821, 0.00161004213216, 0.0015961046498, 0.00146764593642, 0.00144323696861, 0.00140161336144]
# b_result = np.array([0.000590173923748, 0.00223829675234, 0.00349507988276, 0.00554642015014,
# 0.00793513324288, 0.0117633777557, 0.0131939118183, 0.0152916625152,
# 0.0164328270268, 0.0222740491034, 0.0260078343715, 0.026809945385])*1000
#
# s_result = np.array([0.00945765245304, 0.00211677915805, 0.00153174938914, 0.00129779523745,
# 0.00117139743497, 0.00108493653043, 0.00106551896397, 0.00105197218411,
# 0.00104446798347, 0.00100978968546, 0.00100655731514, 0.00100732780158])*1000
# b_result = np.array([0.000556862018053, 0.00226279268004, 0.00373865173411, 0.00554710361537,
# 0.00823055300791, 0.0117136387434, 0.0128881523441, 0.0166177605538,
# 0.016524255912, 0.0221778073856, 0.0257723768586, 0.0267681413876])*1000
#
# s_result = np.array([0.0092905418664, 0.0021032834536, 0.00152273155381, 0.00129437599152,
# 0.00116818969581, 0.00108350271543, 0.00106527594669, 0.00105236611835,
# 0.0010370405632086788, 0.00101056378729, 0.00100803562565, 0.00100450341295])*1000
######### best result
# b_result = np.array([0, 0, 0, 1.24239805608e-06,
# 1.34584248141e-05, 4.84002550078e-05, 0.000117872470448, 0.000214928715841,
# 0.000351449322535, 0.000594727983716, 0.000975557026088, 0.00151676371671])*1000
#
# s_result = np.array([0.00980780382356, 0.00251265470871, 0.00181477766449, 0.00156341771023,
# 0.00142817810789, 0.00134093139615, 0.00128743022846, 0.00124448951586,
# 0.00121615276775, 0.00118856757796, 0.00116722571315, 0.00115158808519])*1000
# rate = 2000
# bucketSize = 200
# w_result = b_result + s_result
#
# x = range(2,14)
# b_theory = np.array([getLatency(rate/i, 0.9, bucketSize/i) for i in x])
# s_theory = np.array([1.0/(1000 - 1800.0/i) for i in x])*1000
# print b_theory
# print s_theory
# plt.plot(x, b_result, '*')
# plt.plot(x,b_theory)
# plt.plot(x, s_result, '.')
# plt.plot(x, s_theory)
# plt.plot(x, w_result, 'o')
# plt.plot(x, b_theory + s_theory)
#
# legendList = ['token_bucket_sim', 'token_bucket_theory', 'server_sim', 'server_theory', 'latency_sim', 'latency_theory']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('number of servers')
# plt.ylabel('average latency (ms)')
# plt.show()
######### draw theory
# b_result = np.array([0, 0, 0, 1.24239805608e-06,
# 1.34584248141e-05, 4.84002550078e-05, 0.000117872470448, 0.000214928715841,
# 0.000351449322535, 0.000594727983716, 0.000975557026088, 0.00151676371671])*1000
#
# s_result = np.array([0.00980780382356, 0.00251265470871, 0.00181477766449, 0.00156341771023,
# 0.00142817810789, 0.00134093139615, 0.00128743022846, 0.00124448951586,
# 0.00121615276775, 0.00118856757796, 0.00116722571315, 0.00115158808519])*1000
#
# util = 0.9
# rate = 2000
# prate = 1000
# bucketSize = 200
# start = 2
# x = range(start,len(b_result)+2)
#
# b_theory = []
# s_theory = []
# for i in x:
# b, s, t = totalDelay(rate, bucketSize, rate*util, prate, i)
# b_theory.append(b)
# s_theory.append(s)
# print b, s, t
# # b_theory = np.array([getLatency(rate/i, util, bucketSize/i) for i in x])
# # s_theory = np.array([1/(prate - start*prate*0.9*1.0/i) for i in x])
# #
#
# w_result = b_result + s_result
# plt.plot(x, b_result, '*')
# plt.plot(x,np.array(b_theory)*1000)
# plt.plot(x, s_result, '.')
# plt.plot(x, np.array(s_theory)*1000)
# plt.plot(x, w_result, 'o')
# plt.plot(x, np.array(b_theory)*1000 + np.array(s_theory)*1000)
#
# legendList = ['token_bucket_sim', 'token_bucket_theory', 'server_sim', 'server_theory', 'latency_sim', 'latency_theory']
# plt.legend(legendList, loc='upper right')
# plt.xlabel('number of servers')
# plt.ylabel('average latency (ms)')
# plt.show()
######### drop load increase
# r = 500
# b = 20
# mu = 500
# opt_n = 1
# x = []
# nList = []
# bList = []
# sList = []
# tList = []
#
# for i in xrange(49):
# lam = (i+1)*10
# x.append(lam)
# for j in xrange(4):
# tb, ts, t = totalDelay(r, b, lam, mu, j+1)
# if len(bList) < j+1:
# bList.append([])
# sList.append([])
# tList.append([])
# bList[j].append(tb)
# sList[j].append(ts)
# tList[j].append(t)
#
# print bList
# print sList
# print tList
# print nList
# #plt.plot(x, b_result, '*')
# plt.plot(x,np.array(tList[0])*1000)
# #plt.plot(x, s_result, '.')
# plt.plot(x, np.array(tList[1])*1000)
# #plt.plot(x, w_result, 'o')
# plt.plot(x, np.array(tList[2])*1000)
# plt.plot(x, np.array(tList[3])*1000)
#
# legendList = ['1 server', '2 servers', '3 servers', '4 servers']
# plt.legend(legendList, loc='upper left')
# plt.xlabel('arrival rate')
# plt.ylabel('average latency (ms)')
# plt.ylim(0, 15)
# plt.show()
############################
# lam = 50.0
# r = 500
# b = [2 , 4, 8, 16, 32, 128]
# bLegend = ["token bucket b="+str(i) for i in b]
# #mu = 500
# opt_n = 1
# x = []
# bList = []
# sList = []
#
# for i in xrange(100):
# mu = lam + (i+1)*0.5
# x.append(lam/mu)
# for j in xrange(len(b)):
# tb, ts, t = totalDelay(mu, b[j], lam, mu, 1)
# if len(bList) < j+1:
# bList.append([])
# bList[j].append(tb*1000)
# sList.append(lam/mu/(mu - lam)*1000)
#
# plt.plot(x,sList)
# for j in xrange(len(b)):
# plt.plot(x, bList[j])
# legendList = ["queuing time"] + bLegend
# plt.legend(legendList, loc='upper left')
# plt.xlabel('utilization')
# plt.ylabel('average latency (ms)')
# plt.ylim(0, 400)
# plt.show()
### increase server
# lam = 500.0
# r = 500
# b = [2 , 4, 8, 16, 32, 64]
# bLegend = ["token bucket b="+str(i) for i in b]
# #mu = 500
# opt_n = 1
# x = []
# bList = []
# sList = []
# ratioA = 3
# ratioB = 4
# ratio = ratioA*1.0/ratioB
#
# for i in xrange(100):
# mu = lam + (i+1)*5.0
# x.append(lam/mu)
# for j in xrange(len(b)):
# tb1, ts1, t1 = totalDelay(mu*ratioA, b[j]*ratioA, lam*ratioA, mu, ratioA)
# tb2, ts2, t2 = totalDelay(mu*ratioA, b[j]*ratioA, lam*ratioA, mu, ratioB)
# if len(bList) < j+1:
# bList.append([])
# bList[j].append((tb2-tb1)*1000)
# print (tb2-tb1)*1000
# sList.append((lam/mu/(mu - lam) - ratio*lam/mu/(mu - lam*ratio))*1000)
#
# print x
# plt.plot(x,sList)
# for j in xrange(len(b)):
# plt.plot(x, bList[j])
# legendList = ["queuing time"] + bLegend
# plt.legend(legendList, loc='upper left')
# plt.xlabel('utilization')
# plt.ylabel('change in latency (ms) when increase a server')
# plt.ylim(0, 40)
# plt.show()
####### plot
# r = 1000.0
# b = 20
# lamList = [950, 955, 960, 965, 970, 975, 980, 985, 990, 995]
# mu = 1000.0
# bList = []
# sList = []
# for lam in lamList:
# lam *= 1.0
# tb, ts, t = totalDelay(r, b, lam, mu, 1)
# bList.append(tb)
# sList.append(ts)
#
#
# plt.plot(lamList, bList)
# plt.plot(lamList, sList)
# legendList = ["token time", "server time"]
# plt.legend(legendList,loc="upper left")
# plt.ylim(-0.01, 0.3)
# plt.xlim(945, 1000)
# plt.show()
##### draw
bList = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]
rList = [1050.0, 1100.0, 1150.0, 1200.0, 1250.0, 1300.0, 1350.0, 1400.0, 1450.0, 1500.0]
tbList = [[0.0086655012164963772, 0.0069462937026606858, 0.0061119878316403089,
0.004497763222681749, 0.003876938261844539, 0.003390546964274696,
0.0027159118226675167, 0.0024347461060429659, 0.0019866744028636291,
0.0017593567570707523, 0.0013200001731956527, 0.00099414911505809364],
[0.0037627201922375147, 0.0025625427437552606, 0.0017829949842417827,
0.0012298620493933936, 0.00082696692817260838, 0.00058308794201233677,
0.00040019521369635792, 0.00026437935404872629, 0.00018632066836472602,
0.00012402267729263054, 7.0943063294622068e-05, 5.6494559620774469e-05],
[0.0022095713301978964, 0.0013096853587949539, 0.00072713937256503071,
0.00042894764353715976, 0.00024653261679521114, 0.00013736124194561419,
8.0361412315634121e-05, 4.4430293019744773e-05, 2.9406724790343939e-05,
1.1795146423416464e-05, 8.5039484345559113e-06, 5.9312839614103951e-06],
[0.0014611881929298993, 0.00072593918371486915, 0.00035256953645628077,
0.00017954436487361305, 8.4489334837231018e-05, 4.1318479906370685e-05,
2.1974399513942762e-05, 1.0698779145992274e-05, 4.4373112312226795e-06,
3.028488661354572e-06, 1.2635151914837706e-06, 5.3085700028457263e-07],
[0.0010424890384005199, 0.00044756733360717705, 0.00018240195443261018,
7.9531375689721311e-05, 3.580796894964474e-05, 1.3407249793421535e-05,
5.0436495208507264e-06, 2.3280473410579587e-06, 1.0953525661858181e-06,
4.1576557217018718e-07, 7.1502696550169271e-08, 3.0380263923825622e-08],
[0.0007794387364076905, 0.00028877273908869651, 0.00010307204425758113,
3.7338351682952915e-05, 1.4652217675302338e-05, 4.8590325874305792e-06,
2.377105866288889e-06, 7.8939033892313559e-07, 1.2753345779798336e-07,
1.5646413789522741e-07, 5.0947068469440634e-09, 2.7730064349452735e-08],
[0.00060260250620125587, 0.00019080787103471121, 5.7977087554087513e-05,
1.8072280771463227e-05, 6.3648178513343291e-06, 1.9166132329105377e-06,
6.420684786416018e-07, 2.2753073556394841e-07, 5.0865510844266733e-08,
9.5950108645411091e-09, 1.7806682401669604e-09, 4.703149038959964e-10],
[0.00047860166509120731, 0.00013136002069690563, 3.6838882146894813e-05,
9.6232019150645455e-06, 3.0314451320358898e-06, 1.0411160334375608e-06,
1.7512695237192022e-07, 6.5362352172974166e-08, 5.7878174140796546e-09,
3.1298298001729565e-10, 0.0, 0.0],
[0.0003868298345192014, 9.373483983780517e-05, 2.4924532266800483e-05,
5.2633050377738303e-06, 1.3950417193645079e-06, 2.6167633881354963e-07,
8.4777204153101606e-08, 1.3302193317463208e-08, 1.5399734173206525e-08,
0.0, 0.0, 0.0],
[0.00031714521472453683, 6.7876044345209876e-05, 1.5430425620576841e-05,
2.8363864016281357e-06, 7.2926797369432278e-07, 1.1011910837496543e-07,
7.7841931393777485e-09, 1.3981584637986088e-08, 0.0,
7.8820269800417015e-11, 0.0, 0.0]]
tsList = [[0.006679677862655292, 0.0068770308868411735, 0.0074732966659507918,
0.0077348077227535148, 0.0078105416045624043, 0.008147963937665325,
0.0084921141776806743, 0.008752305338601777, 0.0088621115063590317,
0.0090566327780958918, 0.0093905065648900807, 0.0094743977123601664],
[0.0076441088658002893, 0.0081786353435035122, 0.0087498405194113942,
0.0090029671774246641, 0.0092297928778259427, 0.0093696536701099262,
0.0096617572741030684, 0.0096833025293018727, 0.010003607981588163,
0.0098724565038900442, 0.0098445482952155567, 0.0098002479005328443],
[0.0086956475242956251, 0.0090666151686463504, 0.0093720184341949571,
0.0094531775361485718, 0.0098398963059626848, 0.0098212355945071234,
0.010018041874352037, 0.0098921459096796907, 0.0098956955424670603,
0.010029270355956356, 0.010064604268816387, 0.01001023740313349],
[0.0090904504424792146, 0.009225412608549784, 0.0096016456056585951,
0.0099027595356318918, 0.010039369303912821, 0.0097721368289364549,
0.010042447619923751, 0.010045292325722056, 0.010007482265982762,
0.0099870953803561369, 0.010184912443106139, 0.0098858368161917395],
[0.0093137423055012838, 0.0095619384206493182, 0.0096557424523883665,
0.0099299592347444968, 0.010063250392674448, 0.010127057969903762,
0.0098904826150556166, 0.010036861438288495, 0.0099961991171080636,
0.0099088390440836595, 0.0096536991934565494, 0.010030348539790601],
[0.0093682707003560229, 0.010176501383261838, 0.0098165119959769571,
0.0097205414379321099, 0.010006320447941785, 0.0099182604435972422,
0.010001961172821086, 0.0098252164378607853, 0.0099495692669901714,
0.010102707098157179, 0.010090222760704749, 0.0099789223025760522],
[0.0096336326797271388, 0.0097238686533284747, 0.0098371166194679786,
0.0097904040711137234, 0.0099297341641229348, 0.010001390250069974,
0.0099266848307628282, 0.0098179879154293419, 0.0098578389481048211,
0.0098189810593029593, 0.010100181267139989, 0.0099267782464376418],
[0.0095568714523684116, 0.009885090780846607, 0.0097968008289410768,
0.0097222136568735906, 0.0099612086330636024, 0.010063981692023737,
0.010186485114693453, 0.010036024516736682, 0.0099838449228713509,
0.010130933882378523, 0.010193518255552692, 0.0099776912059497298],
[0.0098415813407483066, 0.0097824395111458257, 0.009936011172877738,
0.010052051864369575, 0.010126848886467584, 0.010142662759735766,
0.010290573689306257, 0.0099869683348446474, 0.0098433343622829003,
0.0098570165778807204, 0.010013374979903155, 0.010064330226453103],
[0.0097614194737020623, 0.009815994410360249, 0.0099672335642590013,
0.0099349179582449675, 0.0098621461642761175, 0.010137879445556835,
0.009970959157126022, 0.010194055612801445, 0.0099125417813472286,
0.0098741304370536624, 0.0099527508964485801, 0.009803767794647502]]
X = np.array(rList)
Y = np.array(bList)
Y, X = np.meshgrid(Y,X)
#
# fig = plt.figure()
#
# ax = Axes3D(fig) #<-- Note the difference from your original code...
#
# cset = ax.contour(X, Y, np.array(tbList))
# ax.clabel(cset, fontsize=9, inline=1)
# plt.show()
# #
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, np.array(tbList))
ax.set_xlabel('token bucket size')
ax.set_ylabel('token bucket rate')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, np.array(tsList))
ax.set_xlabel('token bucket size')
ax.set_ylabel('token bucket rate')
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_wireframe(X, Y, np.array(tbList)+np.array(tsList))
ax.set_xlabel('token bucket size')
ax.set_ylabel('token bucket rate')
plt.show()
| mit |
mcdaniel67/sympy | sympy/geometry/tests/test_util.py | 2 | 1572 | from __future__ import division
from sympy import Symbol, sqrt, Derivative
from sympy.geometry import Point, Polygon, Segment, convex_hull, intersection, centroid
from sympy.geometry.util import idiff
from sympy.solvers.solvers import solve
from sympy.utilities.pytest import raises
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
t = Symbol('t', real=True)
def test_idiff():
# the use of idiff in ellipse also provides coverage
circ = x**2 + y**2 - 4
ans = -3*x*(x**2 + y**2)/y**5
assert ans == idiff(circ, y, x, 3).simplify()
assert ans == idiff(circ, [y], x, 3).simplify()
assert idiff(circ, y, x, 3).simplify() == ans
explicit = 12*x/sqrt(-x**2 + 4)**5
assert ans.subs(y, solve(circ, y)[0]).equals(explicit)
assert True in [sol.diff(x, 3).equals(explicit) for sol in solve(circ, y)]
assert idiff(x + t + y, [y, t], x) == -Derivative(t, x) - 1
def test_util():
# coverage for some leftover functions in sympy.geometry.util
assert intersection(Point(0, 0)) == []
raises(ValueError, lambda: intersection(Point(0, 0), 3))
raises(ValueError, lambda: convex_hull(Point(0, 0), 3))
def test_util_centroid():
p = Polygon((0, 0), (10, 0), (10, 10))
q = p.translate(0, 20)
assert centroid(p, q) == Point(20, 40)/3
p = Segment((0, 0), (2, 0))
q = Segment((0, 0), (2, 2))
assert centroid(p, q) == Point(1, -sqrt(2) + 2)
assert centroid(Point(0, 0), Point(2, 0)) == Point(2, 0)/2
assert centroid(Point(0, 0), Point(0, 0), Point(2, 0)) == Point(2, 0)/3
| bsd-3-clause |
jmhodges/letsencrypt | acme/acme/other.py | 14 | 2140 | """Other ACME objects."""
import functools
import logging
import os
from acme import jose
logger = logging.getLogger(__name__)
class Signature(jose.JSONObjectWithFields):
"""ACME signature.
:ivar .JWASignature alg: Signature algorithm.
:ivar bytes sig: Signature.
:ivar bytes nonce: Nonce.
:ivar .JWK jwk: JWK.
"""
NONCE_SIZE = 16
"""Minimum size of nonce in bytes."""
alg = jose.Field('alg', decoder=jose.JWASignature.from_json)
sig = jose.Field('sig', encoder=jose.encode_b64jose,
decoder=jose.decode_b64jose)
nonce = jose.Field(
'nonce', encoder=jose.encode_b64jose, decoder=functools.partial(
jose.decode_b64jose, size=NONCE_SIZE, minimum=True))
jwk = jose.Field('jwk', decoder=jose.JWK.from_json)
@classmethod
def from_msg(cls, msg, key, nonce=None, nonce_size=None, alg=jose.RS256):
"""Create signature with nonce prepended to the message.
:param bytes msg: Message to be signed.
:param key: Key used for signing.
:type key: `cryptography.hazmat.primitives.assymetric.rsa.RSAPrivateKey`
(optionally wrapped in `.ComparableRSAKey`).
:param bytes nonce: Nonce to be used. If None, nonce of
``nonce_size`` will be randomly generated.
:param int nonce_size: Size of the automatically generated nonce.
Defaults to :const:`NONCE_SIZE`.
:param .JWASignature alg:
"""
nonce_size = cls.NONCE_SIZE if nonce_size is None else nonce_size
nonce = os.urandom(nonce_size) if nonce is None else nonce
msg_with_nonce = nonce + msg
sig = alg.sign(key, nonce + msg)
logger.debug('%r signed as %r', msg_with_nonce, sig)
return cls(alg=alg, sig=sig, nonce=nonce,
jwk=alg.kty(key=key.public_key()))
def verify(self, msg):
"""Verify the signature.
:param bytes msg: Message that was used in signing.
"""
# self.alg is not Field, but JWA | pylint: disable=no-member
return self.alg.verify(self.jwk.key, self.nonce + msg, self.sig)
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/idna/intranges.py | 293 | 1749 | """
Given a list of integers, made up of (hopefully) a small number of long runs
of consecutive integers, compute a representation of the form
((start1, end1), (start2, end2) ...). Then answer the question "was x present
in the original list?" in time O(log(# runs)).
"""
import bisect
def intranges_from_list(list_):
"""Represent a list of integers as a sequence of ranges:
((start_0, end_0), (start_1, end_1), ...), such that the original
integers are exactly those x such that start_i <= x < end_i for some i.
Ranges are encoded as single integers (start << 32 | end), not as tuples.
"""
sorted_list = sorted(list_)
ranges = []
last_write = -1
for i in range(len(sorted_list)):
if i+1 < len(sorted_list):
if sorted_list[i] == sorted_list[i+1]-1:
continue
current_range = sorted_list[last_write+1:i+1]
ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
last_write = i
return tuple(ranges)
def _encode_range(start, end):
return (start << 32) | end
def _decode_range(r):
return (r >> 32), (r & ((1 << 32) - 1))
def intranges_contain(int_, ranges):
"""Determine if `int_` falls into one of the ranges in `ranges`."""
tuple_ = _encode_range(int_, 0)
pos = bisect.bisect_left(ranges, tuple_)
# we could be immediately ahead of a tuple (start, end)
# with start < int_ <= end
if pos > 0:
left, right = _decode_range(ranges[pos-1])
if left <= int_ < right:
return True
# or we could be immediately behind a tuple (int_, end)
if pos < len(ranges):
left, _ = _decode_range(ranges[pos])
if left == int_:
return True
return False
| gpl-3.0 |
scls19fr/cw | generator.py | 1 | 7267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sched, time
import datetime
import morse_talk as mtalk
from morse_talk.utils import wpm_to_duration, WORD
from abc import ABCMeta, abstractmethod
TRUE = 1
FALSE = 0
def _get_list_of_nb_of_same_bit(s_bin, on_value, off_value):
"""
Calculate number of consecutive elements with same bit value
>>> lst_bin = [1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1]
>>> _get_list_of_nb_of_same_bit(lst_bin, 1, 0)
[1, 1, 1, 1, 1, 3, 3, 1, 3, 1, 3, 3, 1, 1, 1, 1, 1]
"""
bit_prev = on_value
lst_bits_nb = []
count = 0
for i, bit in enumerate(s_bin):
if bit == bit_prev:
count += 1
else:
lst_bits_nb.append(count)
count = 1
bit_prev = bit
lst_bits_nb.append(1)
return lst_bits_nb
def _generate_events(lst_nb, element_duration, offset=1, on_value=TRUE, off_value=FALSE):
"""
Generate events
"""
total_elements = 0
for i, nb in enumerate(lst_nb + [on_value]):
bit = (i + offset) % 2
yield bit, nb * element_duration, total_elements * element_duration
total_elements += nb
def _get_element_duration(element_duration, wpm):
"""
Returns element duration when element_duration and/or code speed is given
>>> _get_element_duration(0.2, None)
0.2
>>> _get_element_duration(None, 15)
0.08
>>> _get_element_duration(None, None)
1
"""
if element_duration is None and wpm is None:
return 1
elif element_duration is not None and wpm is None:
return element_duration
elif element_duration is None and wpm is not None:
return wpm_to_duration(wpm, output='float', word=WORD) / 1000.0
else:
raise NotImplementedError("Can't set both element_duration and wpm")
class MorseCodeGenerator(object):
def __init__(self):
self._on_callback = None
self._on_callback_args = []
self._on_callback_kwargs = {}
self._off_callback = None
self._off_callback_args = []
self._off_callback_kwargs = {}
self._on_value = TRUE
self._off_value = FALSE
self._s = None # scheduler
def _init_scheduler(self, s, lst_bin, element_duration):
"""
Initialize scheduler
"""
lst_nb = _get_list_of_nb_of_same_bit(lst_bin, self._on_value, self._off_value)
priority = 1
for bit, duration, total_delay in _generate_events(lst_nb, element_duration):
if bit == self._off_value:
if self._off_callback is not None:
s.enter(total_delay, priority, self._off_callback, [duration])
elif bit == self._on_value:
if self._on_callback is not None:
s.enter(total_delay, priority, self._on_callback, [duration])
else:
raise NotImplementedError("'%s' is not a bit" % bit)
#priority += 1
def _schedule(self, message, element_duration):
"""
Schedule on / off events according message
"""
lst_bin = mtalk.encoding._encode_binary(message)
self._s = sched.scheduler(time.time, time.sleep)
self._init_scheduler(self._s, lst_bin, element_duration=element_duration)
def _send(self):
self._s.run()
def send(self, message, element_duration=None):
self._schedule(message, element_duration)
self._send()
def _wrap_callback(self, callback, args, kwargs):
if args is None:
args = []
if kwargs is None:
kwargs = {}
def callback_wrapper(duration):
return callback(duration, *args, **kwargs)
return callback_wrapper
def set_callback_on(self, callback, args=None, kwargs=None):
self._on_callback = self._wrap_callback(callback, args, kwargs)
def set_callback_off(self, callback, args=None, kwargs=None):
self._off_callback = self._wrap_callback(callback, args, kwargs)
class SampleGeneratorApp(object):
"""
An abstract class for morse generator application
"""
__metaclass__ = ABCMeta
def __init__(self, message, element_duration):
self.message = message
self._generator = MorseCodeGenerator()
self.element_duration = element_duration
#self._generator.set_callback_on(on, [self.t0])
#self._generator.set_callback_off(off, [self.t0])
@abstractmethod
def on_ON(self, duration):
pass
@abstractmethod
def on_OFF(self, duration):
pass
def run(self):
self.t0 = datetime.datetime.utcnow()
self._generator.set_callback_on(self.on_ON, [self.t0])
self._generator.set_callback_off(self.on_OFF, [self.t0])
self._generator.send(self.message, element_duration=self.element_duration)
class PrintableSampleGeneratorApp(SampleGeneratorApp):
"""
A class to print 1 or 0 on console
according a text message encoded to morse code
"""
def __init__(self, *args, **kwargs):
super(PrintableSampleGeneratorApp, self).__init__(*args, **kwargs)
def on_ON(self, duration, t0):
t = datetime.datetime.utcnow()
td = t - t0
state = TRUE
print("%s %s %s" % (td, state, duration))
def on_OFF(self, duration, t0):
t = datetime.datetime.utcnow()
td = t - t0
state = FALSE
print("%s %s %s" % (td, state, duration))
class ListenableSampleGeneratorApp(SampleGeneratorApp):
"""
A class to send a sound at a given frequency
according a text message encoded to morse code
"""
def __init__(self, *args, **kwargs):
super(ListenableSampleGeneratorApp, self).__init__(*args, **kwargs)
print("sound enabled")
print("")
raise NotImplementedError("ToDo")
def on_ON(self, duration, t0):
t = datetime.datetime.utcnow()
td = t - t0
state = TRUE
print("%s %s %s" % (td, state, duration))
def on_OFF(self, duration, t0):
t = datetime.datetime.utcnow()
td = t - t0
state = FALSE
print("%s %s %s" % (td, state, duration))
class LedSampleGeneratorApp(SampleGeneratorApp):
"""
A class to switch ON or OFF a led
according a text message encoded to morse code
A computer with GPIO is required
(or a computer connected to a board with GPIO such as Arduino boards)
Pingo is also required
http://www.pingo.io/
https://github.com/pingo-io/pingo-py
"""
def __init__(self, *args, **kwargs):
self._led = kwargs.pop('led')
super(LedSampleGeneratorApp, self).__init__(*args, **kwargs)
def on_ON(self, duration, t0):
t = datetime.datetime.utcnow()
td = t - t0
state = TRUE
print("%s %s %s" % (td, state, duration))
self._led.on()
def on_OFF(self, duration, t0):
t = datetime.datetime.utcnow()
td = t - t0
state = FALSE
print("%s %s %s" % (td, state, duration))
self._led.off()
def main():
import doctest
doctest.testmod()
if __name__ == '__main__':
main()
| gpl-3.0 |
mbr0wn/gnuradio | gr-analog/python/analog/qa_ctcss_squelch.py | 5 | 2389 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, analog, blocks
class test_ctcss_squelch(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_ctcss_squelch_001(self):
# Test set/gets
rate = 1
rate2 = 2
freq = 100
level = 0.5
length = 1
ramp = 1
ramp2 = 2
gate = True
gate2 = False
op = analog.ctcss_squelch_ff(rate, freq, level,
length, ramp, gate)
op.set_ramp(ramp2)
r = op.ramp()
self.assertEqual(ramp2, r)
op.set_gate(gate2)
g = op.gate()
self.assertEqual(gate2, g)
def test_ctcss_squelch_002(self):
# Test runtime, gate=True
rate = 1
freq = 100
level = 0.0
length = 1
ramp = 1
gate = True
src_data = [float(x) / 10.0 for x in range(1, 40)]
expected_result = src_data
expected_result[0] = 0
src = blocks.vector_source_f(src_data)
op = analog.ctcss_squelch_ff(rate, freq, level,
length, ramp, gate)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 4)
def test_ctcss_squelch_003(self):
# Test runtime, gate=False
rate = 1
freq = 100
level = 0.5
length = 1
ramp = 1
gate = False
src_data = [float(x) / 10.0 for x in range(1, 40)]
src = blocks.vector_source_f(src_data)
op = analog.ctcss_squelch_ff(rate, freq, level,
length, ramp, gate)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
expected_result = src_data
expected_result[0:5] = [0, 0, 0, 0, 0]
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_ctcss_squelch)
| gpl-3.0 |
3manuek/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
phisiart/shadowsocks | shadowsocks/asyncdns.py | 655 | 17416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self, server_list=None):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
if server_list is None:
self._servers = None
self._parse_resolv()
else:
self._servers = server_list
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append(server)
except IOError:
pass
if not self._servers:
self._servers = ['8.8.4.4', '8.8.8.8']
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unknown hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr[0] not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, (server, 53))
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
| apache-2.0 |
Catch-up-TV-and-More/plugin.video.catchuptvandmore | resources/lib/web_utils.py | 1 | 2074 | # -*- coding: utf-8 -*-
# Copyright: (c) JUL1EN094, SPM, SylvainCecchetto
# Copyright: (c) 2016, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
import json
from random import randint
from codequick import Script
import urlquick
user_agents = [
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36'
'(KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.75.14'
' (KHTML, like Gecko) Version/7.0.3 Safari/7046A194A',
'Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_1) AppleWebKit/602.2.14'
' (KHTML, like Gecko) Version/10.0.1 Safari/602.2.14',
'Mozilla/5.0 (Windows NT 6.1; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/55.0.2883.87 Safari/537.36'
]
def get_ua():
"""Get first user agent in the 'user_agents' list
Returns:
str: User agent
"""
return user_agents[0]
def get_random_ua():
"""Get a random user agent in the 'user_agents' list
Returns:
str: Random user agent
"""
return user_agents[randint(0, len(user_agents) - 1)]
# code adapted from weather.weatherbit.io - Thanks Ronie
def geoip():
"""Get country code based on IP address
Returns:
str: Country code (e.g. FR)
"""
# better service - https://geoftv-a.akamaihd.net/ws/edgescape.json
try:
resp = urlquick.get('https://geoftv-a.akamaihd.net/ws/edgescape.json', max_age=-1)
data = json.loads(resp.text)
if 'reponse' in data:
return data['reponse']['geo_info']['country_code']
except Exception:
pass
Script.notify(Script.get_info('name'), Script.localize(30724), icon=Script.NOTIFY_WARNING)
Script.log('Failed to get country code based on IP address', lvl=Script.WARNING)
return None
| gpl-2.0 |
andrewyoung1991/abjad | abjad/tools/instrumenttools/ContrabassClarinet.py | 2 | 5003 | # -*- encoding: utf-8 -*-
from abjad.tools import indicatortools
from abjad.tools import markuptools
from abjad.tools import pitchtools
from abjad.tools.instrumenttools.Instrument import Instrument
class ContrabassClarinet(Instrument):
r'''A contrassbass clarinet.
::
>>> staff = Staff("c'4 d'4 e'4 fs'4")
>>> contrabass_clarinet = instrumenttools.ContrabassClarinet()
>>> attach(contrabass_clarinet, staff)
>>> show(staff) # doctest: +SKIP
.. doctest::
>>> print(format(staff))
\new Staff {
\set Staff.instrumentName = \markup { Contrabass clarinet }
\set Staff.shortInstrumentName = \markup { Cbass. cl. }
c'4
d'4
e'4
fs'4
}
'''
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
instrument_name='contrabass clarinet',
short_instrument_name='cbass. cl.',
instrument_name_markup=None,
short_instrument_name_markup=None,
allowable_clefs=('treble', 'bass'),
pitch_range='[Bb0, G4]',
sounding_pitch_of_written_middle_c='Bb1',
):
Instrument.__init__(
self,
instrument_name=instrument_name,
short_instrument_name=short_instrument_name,
instrument_name_markup=instrument_name_markup,
short_instrument_name_markup=short_instrument_name_markup,
allowable_clefs=allowable_clefs,
pitch_range=pitch_range,
sounding_pitch_of_written_middle_c=\
sounding_pitch_of_written_middle_c,
)
self._performer_names.extend([
'wind player',
'reed player',
'single reed player',
'clarinettist',
'clarinetist',
])
### PUBLIC PROPERTIES ###
@property
def allowable_clefs(self):
r'''Gets contrabass clarinet's allowable clefs.
.. container:: example
::
>>> contrabass_clarinet.allowable_clefs
ClefInventory([Clef(name='treble'), Clef(name='bass')])
::
>>> show(contrabass_clarinet.allowable_clefs) # doctest: +SKIP
Returns clef inventory.
'''
return Instrument.allowable_clefs.fget(self)
@property
def instrument_name(self):
r'''Gets contrabass clarinet's name.
.. container:: example
::
>>> contrabass_clarinet.instrument_name
'contrabass clarinet'
Returns string.
'''
return Instrument.instrument_name.fget(self)
@property
def instrument_name_markup(self):
r'''Gets contrabass clarinet's instrument name markup.
.. container:: example
::
>>> contrabass_clarinet.instrument_name_markup
Markup(contents=('Contrabass clarinet',))
::
>>> show(contrabass_clarinet.instrument_name_markup) # doctest: +SKIP
Returns markup.
'''
return Instrument.instrument_name_markup.fget(self)
@property
def pitch_range(self):
r'''Gets contrabass clarinet's range.
.. container:: example
::
>>> contrabass_clarinet.pitch_range
PitchRange(range_string='[Bb0, G4]')
::
>>> show(contrabass_clarinet.pitch_range) # doctest: +SKIP
Returns pitch range.
'''
return Instrument.pitch_range.fget(self)
@property
def short_instrument_name(self):
r'''Gets contrabass clarinet's short instrument name.
.. container:: example
::
>>> contrabass_clarinet.short_instrument_name
'cbass. cl.'
Returns string.
'''
return Instrument.short_instrument_name.fget(self)
@property
def short_instrument_name_markup(self):
r'''Gets contrabass clarinet's short instrument name markup.
.. container:: example
::
>>> contrabass_clarinet.short_instrument_name_markup
Markup(contents=('Cbass. cl.',))
::
>>> show(contrabass_clarinet.short_instrument_name_markup) # doctest: +SKIP
Returns markup.
'''
return Instrument.short_instrument_name_markup.fget(self)
@property
def sounding_pitch_of_written_middle_c(self):
r'''Gets sounding pitch of contrabass_clarinet's written middle C.
.. container:: example
::
>>> contrabass_clarinet.sounding_pitch_of_written_middle_c
NamedPitch('bf,,')
::
>>> show(contrabass_clarinet.sounding_pitch_of_written_middle_c) # doctest: +SKIP
Returns named pitch.
'''
return Instrument.sounding_pitch_of_written_middle_c.fget(self)
| gpl-3.0 |
maxalbert/ansible | lib/ansible/playbook/block.py | 13 | 11985 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_tasks
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
class Block(Base, Become, Conditional, Taggable):
_block = FieldAttribute(isa='list', default=[])
_rescue = FieldAttribute(isa='list', default=[])
_always = FieldAttribute(isa='list', default=[])
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
#_otherwise = FieldAttribute(isa='list')
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False):
self._play = play
self._role = role
self._task_include = task_include
self._parent_block = parent_block
self._use_handlers = use_handlers
self._dep_chain = []
super(Block, self).__init__()
def get_vars(self):
'''
Blocks do not store variables directly, however they may be a member
of a role or task include which does, so return those if present.
'''
all_vars = self.vars.copy()
if self._role:
all_vars.update(self._role.get_vars(self._dep_chain))
if self._parent_block:
all_vars.update(self._parent_block.get_vars())
if self._task_include:
all_vars.update(self._task_include.get_vars())
return all_vars
@staticmethod
def load(data, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers)
return b.load_data(data, variable_manager=variable_manager, loader=loader)
def preprocess_data(self, ds):
'''
If a simple task is given, an implicit block for that single task
is created, which goes in the main portion of the block
'''
is_block = False
for attr in ('block', 'rescue', 'always'):
if attr in ds:
is_block = True
break
if not is_block:
if isinstance(ds, list):
return super(Block, self).preprocess_data(dict(block=ds))
else:
return super(Block, self).preprocess_data(dict(block=[ds]))
return super(Block, self).preprocess_data(ds)
def _load_block(self, attr, ds):
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
def _load_rescue(self, attr, ds):
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
def _load_always(self, attr, ds):
return load_list_of_tasks(
ds,
play=self._play,
block=self,
role=self._role,
task_include=self._task_include,
variable_manager=self._variable_manager,
loader=self._loader,
use_handlers=self._use_handlers,
)
# not currently used
#def _load_otherwise(self, attr, ds):
# return load_list_of_tasks(
# ds,
# play=self._play,
# block=self,
# role=self._role,
# task_include=self._task_include,
# variable_manager=self._variable_manager,
# loader=self._loader,
# use_handlers=self._use_handlers,
# )
def copy(self, exclude_parent=False):
def _dupe_task_list(task_list, new_block):
new_task_list = []
for task in task_list:
if isinstance(task, Block):
new_task = task.copy(exclude_parent=True)
new_task._parent_block = new_block
else:
new_task = task.copy(exclude_block=True)
new_task._block = new_block
new_task_list.append(new_task)
return new_task_list
new_me = super(Block, self).copy()
new_me._play = self._play
new_me._use_handlers = self._use_handlers
new_me._dep_chain = self._dep_chain[:]
new_me.block = _dupe_task_list(self.block or [], new_me)
new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
new_me.always = _dupe_task_list(self.always or [], new_me)
new_me._parent_block = None
if self._parent_block and not exclude_parent:
new_me._parent_block = self._parent_block.copy()
new_me._role = None
if self._role:
new_me._role = self._role
new_me._task_include = None
if self._task_include:
new_me._task_include = self._task_include.copy()
return new_me
def serialize(self):
'''
Override of the default serialize method, since when we're serializing
a task we don't want to include the attribute list of tasks.
'''
data = dict()
for attr in self._get_base_attributes():
if attr not in ('block', 'rescue', 'always'):
data[attr] = getattr(self, attr)
data['dep_chain'] = self._dep_chain
if self._role is not None:
data['role'] = self._role.serialize()
if self._task_include is not None:
data['task_include'] = self._task_include.serialize()
return data
def deserialize(self, data):
'''
Override of the default deserialize method, to match the above overridden
serialize method
'''
from ansible.playbook.task import Task
# we don't want the full set of attributes (the task lists), as that
# would lead to a serialize/deserialize loop
for attr in self._get_base_attributes():
if attr in data and attr not in ('block', 'rescue', 'always'):
setattr(self, attr, data.get(attr))
self._dep_chain = data.get('dep_chain', [])
# if there was a serialized role, unpack it too
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
# if there was a serialized task include, unpack it too
ti_data = data.get('task_include')
if ti_data:
ti = Task()
ti.deserialize(ti_data)
self._task_include = ti
def evaluate_conditional(self, templar, all_vars):
if len(self._dep_chain):
for dep in self._dep_chain:
if not dep.evaluate_conditional(templar, all_vars):
return False
if self._task_include is not None:
if not self._task_include.evaluate_conditional(templar, all_vars):
return False
if self._parent_block is not None:
if not self._parent_block.evaluate_conditional(templar, all_vars):
return False
elif self._role is not None:
if not self._role.evaluate_conditional(templar, all_vars):
return False
return super(Block, self).evaluate_conditional(templar, all_vars)
def set_loader(self, loader):
self._loader = loader
if self._parent_block:
self._parent_block.set_loader(loader)
elif self._role:
self._role.set_loader(loader)
if self._task_include:
self._task_include.set_loader(loader)
for dep in self._dep_chain:
dep.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False):
'''
Generic logic to get the attribute or parent attribute for a block value.
'''
value = self._attributes[attr]
if self._parent_block and (value is None or extend):
parent_value = getattr(self._parent_block, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if self._task_include and (value is None or extend):
parent_value = getattr(self._task_include, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if self._role and (value is None or extend):
parent_value = getattr(self._role, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
if len(self._dep_chain) and (not value or extend):
reverse_dep_chain = self._dep_chain[:]
reverse_dep_chain.reverse()
for dep in reverse_dep_chain:
dep_value = getattr(dep, attr)
if extend:
value = self._extend_value(value, dep_value)
else:
value = dep_value
if value is not None and not extend:
break
if self._play and (value is None or extend):
parent_value = getattr(self._play, attr)
if extend:
value = self._extend_value(value, parent_value)
else:
value = parent_value
return value
def _get_attr_environment(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
environment = self._attributes['tags']
if environment is None:
environment = dict()
environment = self._get_parent_attribute('environment', extend=True)
return environment
def filter_tagged_tasks(self, play_context, all_vars):
'''
Creates a new block, with task lists filtered based on the tags contained
within the play_context object.
'''
def evaluate_and_append_task(target):
tmp_list = []
for task in target:
if task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
new_block = self.copy()
new_block.block = evaluate_and_append_task(self.block)
new_block.rescue = evaluate_and_append_task(self.rescue)
new_block.always = evaluate_and_append_task(self.always)
return new_block
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
| gpl-3.0 |
fuhongliang/erpnext | erpnext/manufacturing/doctype/bom_replace_tool/bom_replace_tool.py | 59 | 1375 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, flt
from frappe import _
from frappe.model.document import Document
class BOMReplaceTool(Document):
def replace_bom(self):
self.validate_bom()
self.update_new_bom()
bom_list = self.get_parent_boms()
updated_bom = []
for bom in bom_list:
bom_obj = frappe.get_doc("BOM", bom)
updated_bom = bom_obj.update_cost_and_exploded_items(updated_bom)
frappe.msgprint(_("BOM replaced"))
def validate_bom(self):
if cstr(self.current_bom) == cstr(self.new_bom):
frappe.throw(_("Current BOM and New BOM can not be same"))
def update_new_bom(self):
current_bom_unitcost = frappe.db.sql("""select total_cost/quantity
from `tabBOM` where name = %s""", self.current_bom)
current_bom_unitcost = current_bom_unitcost and flt(current_bom_unitcost[0][0]) or 0
frappe.db.sql("""update `tabBOM Item` set bom_no=%s,
rate=%s, amount=qty*%s where bom_no = %s and docstatus < 2""",
(self.new_bom, current_bom_unitcost, current_bom_unitcost, self.current_bom))
def get_parent_boms(self):
return [d[0] for d in frappe.db.sql("""select distinct parent
from `tabBOM Item` where ifnull(bom_no, '') = %s and docstatus < 2""",
self.new_bom)]
| agpl-3.0 |
bcantoni/ccm | ccmlib/cmds/command.py | 2 | 3493 |
from __future__ import absolute_import
import sys
from optparse import BadOptionError, Option, OptionParser
from six import print_
from ccmlib import common
from ccmlib.cluster_factory import ClusterFactory
# This is fairly fragile, but handy for now
class ForgivingParser(OptionParser):
def __init__(self, usage=None, option_list=None, option_class=Option, version=None, conflict_handler="error", description=None, formatter=None, add_help_option=True, prog=None, epilog=None):
OptionParser.__init__(self, usage, option_list, option_class, version, conflict_handler, description, formatter, add_help_option, prog, epilog)
self.ignored = []
def _process_short_opts(self, rargs, values):
opt = rargs[0]
try:
OptionParser._process_short_opts(self, rargs, values)
except BadOptionError:
self.ignored.append(opt)
self.eat_args(rargs)
def _process_long_opt(self, rargs, values):
opt = rargs[0]
try:
OptionParser._process_long_opt(self, rargs, values)
except BadOptionError:
self.ignored.append(opt)
self.eat_args(rargs)
def eat_args(self, rargs):
while len(rargs) > 0 and rargs[0][0] != '-':
self.ignored.append(rargs.pop(0))
def get_ignored(self):
return self.ignored
class Cmd(object):
def get_parser(self):
pass
def validate(self, parser, options, args, cluster_name=False, node_name=False, load_cluster=False, load_node=True):
self.options = options
self.args = args
if options.config_dir is None:
self.path = common.get_default_path()
else:
self.path = options.config_dir
if cluster_name:
if len(args) == 0:
print_('Missing cluster name', file=sys.stderr)
parser.print_help()
exit(1)
self.name = args[0]
if node_name:
if len(args) == 0:
print_('Missing node name', file=sys.stderr)
parser.print_help()
exit(1)
self.name = args[0]
if load_cluster:
self.cluster = self._load_current_cluster()
if node_name and load_node:
try:
self.node = self.cluster.nodes[self.name]
except KeyError:
print_('Unknown node %s in cluster %s' % (self.name, self.cluster.name), file=sys.stderr)
exit(1)
def run(self):
pass
def _get_default_parser(self, usage, description, ignore_unknown_options=False):
if ignore_unknown_options:
parser = ForgivingParser(usage=usage, description=description)
else:
parser = OptionParser(usage=usage, description=description)
parser.add_option('--config-dir', type="string", dest="config_dir",
help="Directory for the cluster files [default to {0}]".format(common.get_default_path_display_name()))
return parser
def description(self):
return ""
def _load_current_cluster(self):
name = common.current_cluster_name(self.path)
if name is None:
print_('No currently active cluster (use ccm cluster switch)')
exit(1)
try:
return ClusterFactory.load(self.path, name)
except common.LoadError as e:
print_(str(e))
exit(1)
| apache-2.0 |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/factorization/python/ops/gen_clustering_ops.py | 1 | 12595 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: gen_clustering_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('kmc2_chain_initialization')
def kmc2_chain_initialization(distances, seed, name=None):
r"""Returns the index of a data point that should be added to the seed set.
Entries in distances are assumed to be squared distances of candidate points to
the already sampled centers in the seed set. The op constructs one Markov chain
of the k-MC^2 algorithm and returns the index of one candidate point to be added
as an additional cluster center.
Args:
distances: A `Tensor` of type `float32`.
Vector with squared distances to the closest previously sampled
cluster center for each candidate point.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`. Scalar with the index of the sampled point.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"KMC2ChainInitialization", distances=distances, seed=seed, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"KMC2ChainInitialization", name, _ctx._post_execution_callbacks,
distances, seed)
return _result
except _core._FallbackException:
return kmc2_chain_initialization_eager_fallback(
distances, seed, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def kmc2_chain_initialization_eager_fallback(distances, seed, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmc2_chain_initialization
"""
_ctx = ctx if ctx else _context.context()
distances = _ops.convert_to_tensor(distances, _dtypes.float32)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
_inputs_flat = [distances, seed]
_attrs = None
_result = _execute.execute(b"KMC2ChainInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("KMC2ChainInitialization")(None)
@tf_export('kmeans_plus_plus_initialization')
def kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample, name=None):
r"""Selects num_to_sample rows of input using the KMeans++ criterion.
Rows of points are assumed to be input points. One row is selected at random.
Subsequent rows are sampled with probability proportional to the squared L2
distance from the nearest row selected thus far till num_to_sample rows have
been sampled.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
num_to_sample: A `Tensor` of type `int64`.
Scalar. The number of rows to sample. This value must not be
larger than n.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
num_retries_per_sample: A `Tensor` of type `int64`.
Scalar. For each row that is sampled, this parameter
specifies the number of additional points to draw from the current
distribution before selecting the best. If a negative value is specified, a
heuristic is used to sample O(log(num_to_sample)) additional points.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
Matrix of shape (num_to_sample, d). The sampled rows.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"KmeansPlusPlusInitialization", points=points,
num_to_sample=num_to_sample, seed=seed,
num_retries_per_sample=num_retries_per_sample, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"KmeansPlusPlusInitialization", name, _ctx._post_execution_callbacks,
points, num_to_sample, seed, num_retries_per_sample)
return _result
except _core._FallbackException:
return kmeans_plus_plus_initialization_eager_fallback(
points, num_to_sample, seed, num_retries_per_sample, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def kmeans_plus_plus_initialization_eager_fallback(points, num_to_sample, seed, num_retries_per_sample, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmeans_plus_plus_initialization
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
num_to_sample = _ops.convert_to_tensor(num_to_sample, _dtypes.int64)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
num_retries_per_sample = _ops.convert_to_tensor(num_retries_per_sample, _dtypes.int64)
_inputs_flat = [points, num_to_sample, seed, num_retries_per_sample]
_attrs = None
_result = _execute.execute(b"KmeansPlusPlusInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("KmeansPlusPlusInitialization")(None)
_nearest_neighbors_outputs = ["nearest_center_indices",
"nearest_center_distances"]
_NearestNeighborsOutput = _collections.namedtuple(
"NearestNeighbors", _nearest_neighbors_outputs)
@tf_export('nearest_neighbors')
def nearest_neighbors(points, centers, k, name=None):
r"""Selects the k nearest centers for each point.
Rows of points are assumed to be input points. Rows of centers are assumed to be
the list of candidate centers. For each point, the k centers that have least L2
distance to it are computed.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
centers: A `Tensor` of type `float32`.
Matrix of shape (m, d). Rows are assumed to be centers.
k: A `Tensor` of type `int64`.
Scalar. Number of nearest centers to return for each point. If k is larger
than m, then only m centers are returned.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (nearest_center_indices, nearest_center_distances).
nearest_center_indices: A `Tensor` of type `int64`. Matrix of shape (n, min(m, k)). Each row contains the
indices of the centers closest to the corresponding point, ordered by
increasing distance.
nearest_center_distances: A `Tensor` of type `float32`. Matrix of shape (n, min(m, k)). Each row contains the
squared L2 distance to the corresponding center in nearest_center_indices.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"NearestNeighbors", points=points, centers=centers, k=k, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"NearestNeighbors", name, _ctx._post_execution_callbacks, points,
centers, k)
_result = _NearestNeighborsOutput._make(_result)
return _result
except _core._FallbackException:
return nearest_neighbors_eager_fallback(
points, centers, k, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def nearest_neighbors_eager_fallback(points, centers, k, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function nearest_neighbors
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
centers = _ops.convert_to_tensor(centers, _dtypes.float32)
k = _ops.convert_to_tensor(k, _dtypes.int64)
_inputs_flat = [points, centers, k]
_attrs = None
_result = _execute.execute(b"NearestNeighbors", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
_ops.RegisterShape("NearestNeighbors")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "KMC2ChainInitialization"
# input_arg {
# name: "distances"
# type: DT_FLOAT
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# output_arg {
# name: "index"
# type: DT_INT64
# }
# }
# op {
# name: "KmeansPlusPlusInitialization"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "num_to_sample"
# type: DT_INT64
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# input_arg {
# name: "num_retries_per_sample"
# type: DT_INT64
# }
# output_arg {
# name: "samples"
# type: DT_FLOAT
# }
# }
# op {
# name: "NearestNeighbors"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "centers"
# type: DT_FLOAT
# }
# input_arg {
# name: "k"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_indices"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_distances"
# type: DT_FLOAT
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n=\n\027KMC2ChainInitialization\022\r\n\tdistances\030\001\022\010\n\004seed\030\t\032\t\n\005index\030\t\np\n\034KmeansPlusPlusInitialization\022\n\n\006points\030\001\022\021\n\rnum_to_sample\030\t\022\010\n\004seed\030\t\022\032\n\026num_retries_per_sample\030\t\032\013\n\007samples\030\001\nl\n\020NearestNeighbors\022\n\n\006points\030\001\022\013\n\007centers\030\001\022\005\n\001k\030\t\032\032\n\026nearest_center_indices\030\t\032\034\n\030nearest_center_distances\030\001")
| mit |
tempbottle/ironpython3 | Src/StdLib/Lib/test/test_select.py | 84 | 2742 | import errno
import os
import select
import sys
import unittest
from test import support
@unittest.skipIf((sys.platform[:3]=='win'),
"can't easily test on this system")
class SelectTestCase(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
self.assertRaises(ValueError, select.select, [], [], [], -1)
# Issue #12367: http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/155606
@unittest.skipIf(sys.platform.startswith('freebsd'),
'skip because of a FreeBSD bug: kern/155606')
def test_errno(self):
with open(__file__, 'rb') as fp:
fd = fp.fileno()
fp.close()
try:
select.select([fd], [], [], 0)
except OSError as err:
self.assertEqual(err.errno, errno.EBADF)
else:
self.fail("exception not raised")
def test_returned_list_identity(self):
# See issue #8329
r, w, x = select.select([], [], [], 1)
self.assertIsNot(r, w)
self.assertIsNot(r, x)
self.assertIsNot(w, x)
def test_select(self):
cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done'
p = os.popen(cmd, 'r')
for tout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if support.verbose:
print('timeout =', tout)
rfd, wfd, xfd = select.select([p], [], [], tout)
if (rfd, wfd, xfd) == ([], [], []):
continue
if (rfd, wfd, xfd) == ([p], [], []):
line = p.readline()
if support.verbose:
print(repr(line))
if not line:
if support.verbose:
print('EOF')
break
continue
self.fail('Unexpected return values from select():', rfd, wfd, xfd)
p.close()
# Issue 16230: Crash on select resized list
def test_select_mutated(self):
a = []
class F:
def fileno(self):
del a[-1]
return sys.__stdout__.fileno()
a[:] = [F()] * 10
self.assertEqual(select.select([], a, []), ([], a[:5], []))
def test_main():
support.run_unittest(SelectTestCase)
support.reap_children()
if __name__ == "__main__":
test_main()
| apache-2.0 |
peri-source/peri | scripts/does_matter/pixel-integration.py | 1 | 3809 | import pickle
import numpy as np
import scipy as sp
import scipy.ndimage as nd
import scipy.interpolate as intr
import common
from peri import const, runner
from peri.test import init
def pxint(radius=8, factor=8, dx=np.array([0,0,0])):
# the factor of coarse-graining, goal particle size, and larger size
f = factor
goalsize = radius
goalpsf = np.array([2.0, 1.0, 3.0])
bigsize = goalsize * f
bigpsf = goalpsf * np.array([f,f,1])
s0 = init.create_single_particle_state(
imsize=np.array((4*goalsize, 4*bigsize, 4*bigsize)),
radius=bigsize, psfargs={'params': bigpsf, 'error': 1e-6},
stateargs={'zscale': 1.0*f})
s0.obj.pos += np.array([0,1,1]) * (f-1.0)/2.0
s0.obj.pos += np.array([1,f,f]) * dx
s0.reset()
# coarse-grained image
sl = np.s_[s0.pad:-s0.pad,s0.pad:-s0.pad,s0.pad:-s0.pad]
m = s0.get_model_image()[sl]
# indices for coarse-graining
e = m.shape[1]
i = np.linspace(0, e/f, e, endpoint=False).astype('int')
j = np.linspace(0, e/f, e/f, endpoint=False).astype('int')
z,y,x = np.meshgrid(*(j,i,i), indexing='ij')
ind = x + e*y + e*e*z
# finally, c-g'ed image
cg = nd.mean(m, labels=ind, index=np.unique(ind)).reshape(e/f, e/f, e/f)
# place that into a new image at the expected parameters
s = init.create_single_particle_state(imsize=4*goalsize, sigma=0.05,
radius=goalsize, psfargs={'params': goalpsf, 'error': 1e-6})
s.obj.pos += dx
s.reset()
# measure the true inferred parameters
return s, cg
def dorun(SNR=20, sweeps=20, burn=8, noise_samples=10):
"""
we want to display the errors introduced by pixelation so we plot:
* zero noise, cg image, fit
* SNR 20, cg image, fit
* CRB for both
a = dorun(noise_samples=30, sweeps=24, burn=12, SNR=20)
"""
radii = np.linspace(2,10,8, endpoint=False)
crbs, vals, errs = [], [], []
for radius in radii:
print 'radius', radius
s,im = pxint(radius=radius, factor=4)
goodstate = s.state.copy()
common.set_image(s, im, 1.0/SNR)
tcrb = crb(s)
tval, terr = sample(s, im, 1.0/SNR, N=noise_samples, sweeps=sweeps, burn=burn)
crbs.append(tcrb)
vals.append(tval)
errs.append(terr)
return np.array(crbs), np.array(vals), np.array(errs), radii
def doplot(prefix='/media/scratch/peri/does_matter/pixint', snrs=[20,200,2000]):
s,im = pxint(radius=8, factor=8, dx=np.array([0,0,0]))
nn = np.s_[:,:,im.shape[2]/2]
diff = (im - s.get_model_image()[s.inner])
image0, image1 = im[nn], diff[nn]
def interp(t, c):
x = np.linspace(t[0], t[-1], 1000)
f = intr.interp1d(t, c, kind='quadratic')
return x, f(x)
for i,(c,snr) in enumerate(zip(COLORS, snrs)):
fn = prefix+'-snr'+str(snr)+'.pkl'
crb, val, err, radii = pickle.load(open(fn))
d = lambda x: x.mean(axis=1)[:,0]
if i == 0:
label0 = r"$\rm{SNR} = %i$ CRB" % snr
label1 = r"$\rm{SNR} = %i$ Error" % snr
else:
label0 = r"$%i$, CRB" % snr
label1 = r"$%i$, Error" % snr
ax.plot(*interp(radii, crb[:,1]), ls='-', c=c, lw=3, label=label0)
ax.plot(radii, d(err), 'o', ls='--', lw=0, c=c, ms=12, label=label1)
#if i == 1:
# x,y = interp(radii, crb[:,1])
# pl.fill_between(x, y/2-y/2/7, y/2+y/2/7, color='k', alpha=0.2)
ax.semilogy()
ax.set_xlim(radii[0], radii[-1])
ax.set_ylim(1e-5, 1e0)
ax.set_xlabel(r"Particle radius (px)")
ax.set_ylabel(r"Position CRB, Error (px)")
ax.legend(loc='best', numpoints=1, ncol=3, prop={'size': 16})
ax.grid(False, which='both', axis='both')
ax.set_title("Pixel integration")
| mit |
muffinresearch/addons-server | apps/reviews/tests/test_views.py | 10 | 22021 | # -*- coding: utf-8 -*-
import json
from nose.tools import eq_
from pyquery import PyQuery as pq
import mock
import amo.tests
from amo import helpers
from access.models import Group, GroupUser
from addons.models import Addon, AddonUser
from devhub.models import ActivityLog
from reviews.models import Review, ReviewFlag
from users.models import UserProfile
class ReviewTest(amo.tests.TestCase):
fixtures = ['reviews/dev-reply.json', 'base/admin']
def setUp(self):
super(ReviewTest, self).setUp()
self.addon = Addon.objects.get(id=1865)
def login_dev(self):
self.client.login(username='trev@adblockplus.org', password='password')
def login_admin(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
def make_it_my_review(self, review_id=218468):
r = Review.objects.get(id=review_id)
r.user = UserProfile.objects.get(username='jbalogh')
r.save()
class TestViews(ReviewTest):
def test_dev_reply(self):
url = helpers.url('addons.reviews.detail', self.addon.slug, 218468)
r = self.client.get(url)
eq_(r.status_code, 200)
def test_dev_no_rss(self):
url = helpers.url('addons.reviews.detail', self.addon.slug, 218468)
r = self.client.get(url)
doc = pq(r.content)
eq_(doc('link[title=RSS]').length, 0)
def test_404_user_page(self):
url = helpers.url('addons.reviews.user', self.addon.slug, 233452342)
r = self.client.get(url)
eq_(r.status_code, 404)
def test_feed(self):
url = helpers.url('addons.reviews.list.rss', self.addon.slug)
r = self.client.get(url)
eq_(r.status_code, 200)
def test_abuse_form(self):
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
self.assertTemplateUsed(r, 'reviews/report_review.html')
r = self.client.get(helpers.url('addons.reviews.detail',
self.addon.slug, 218468))
self.assertTemplateUsed(r, 'reviews/report_review.html')
def test_edit_review_form(self):
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
self.assertTemplateUsed(r, 'reviews/edit_review.html')
r = self.client.get(helpers.url('addons.reviews.detail',
self.addon.slug, 218468))
self.assertTemplateUsed(r, 'reviews/edit_review.html')
def test_list(self):
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
eq_(r.status_code, 200)
doc = pq(r.content)
reviews = doc('#reviews .item')
eq_(reviews.length, Review.objects.count())
eq_(Review.objects.count(), 2)
eq_(doc('.secondary .average-rating').length, 1)
eq_(doc('.secondary .no-rating').length, 0)
r = Review.objects.get(id=218207)
item = reviews.filter('#review-218207')
eq_(r.reply_to_id, None)
eq_(item.hasClass('reply'), False)
eq_(item.length, 1)
eq_(item.attr('data-rating'), str(r.rating))
r = Review.objects.get(id=218468)
item = reviews.filter('#review-218468')
eq_(item.length, 1)
eq_(r.reply_to_id, 218207)
eq_(item.hasClass('reply'), True)
eq_(r.rating, None)
eq_(item.attr('data-rating'), '')
def test_list_rss(self):
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
doc = pq(r.content)
eq_(doc('link[title=RSS]').length, 1)
def test_empty_list(self):
Review.objects.all().delete()
eq_(Review.objects.count(), 0)
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#reviews .item').length, 0)
eq_(doc('#add-first-review').length, 1)
eq_(doc('.secondary .average-rating').length, 0)
eq_(doc('.secondary .no-rating').length, 1)
def test_list_item_actions(self):
self.login_admin()
self.make_it_my_review()
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
reviews = pq(r.content)('#reviews .item')
r = Review.objects.get(id=218207)
item = reviews.filter('#review-218207')
actions = item.find('.item-actions')
eq_(actions.length, 1)
classes = sorted(c.get('class') for c in actions.find('li a'))
eq_(classes, ['delete-review', 'flag-review'])
r = Review.objects.get(id=218468)
item = reviews.filter('#review-218468')
actions = item.find('.item-actions')
eq_(actions.length, 1)
classes = sorted(c.get('class') for c in actions.find('li a'))
eq_(classes, ['delete-review', 'review-edit'])
def test_cant_view_unlisted_addon_reviews(self):
"""An unlisted addon doesn't have reviews."""
self.addon.update(is_listed=False)
assert self.client.get(helpers.url('addons.reviews.list',
self.addon.slug)).status_code == 404
class TestFlag(ReviewTest):
def setUp(self):
super(TestFlag, self).setUp()
self.url = helpers.url('addons.reviews.flag', self.addon.slug, 218468)
self.login_admin()
def test_no_login(self):
self.client.logout()
response = self.client.post(self.url)
eq_(response.status_code, 401)
def test_new_flag(self):
response = self.client.post(self.url, {'flag': ReviewFlag.SPAM})
eq_(response.status_code, 200)
eq_(response.content, '{"msg": "Thanks; this review has been '
'flagged for editor approval."}')
eq_(ReviewFlag.objects.filter(flag=ReviewFlag.SPAM).count(), 1)
eq_(Review.objects.filter(editorreview=True).count(), 1)
def test_new_flag_mine(self):
self.make_it_my_review()
response = self.client.post(self.url, {'flag': ReviewFlag.SPAM})
eq_(response.status_code, 404)
def test_update_flag(self):
response = self.client.post(self.url, {'flag': ReviewFlag.SPAM})
eq_(response.status_code, 200)
eq_(ReviewFlag.objects.filter(flag=ReviewFlag.SPAM).count(), 1)
eq_(Review.objects.filter(editorreview=True).count(), 1)
response = self.client.post(self.url, {'flag': ReviewFlag.LANGUAGE})
eq_(response.status_code, 200)
eq_(ReviewFlag.objects.filter(flag=ReviewFlag.LANGUAGE).count(), 1)
eq_(ReviewFlag.objects.count(), 1)
eq_(Review.objects.filter(editorreview=True).count(), 1)
def test_flag_with_note(self):
response = self.client.post(self.url,
{'flag': ReviewFlag.OTHER, 'note': 'xxx'})
eq_(response.status_code, 200)
eq_(ReviewFlag.objects.filter(flag=ReviewFlag.OTHER).count(),
1)
eq_(ReviewFlag.objects.count(), 1)
eq_(ReviewFlag.objects.get(flag=ReviewFlag.OTHER).note, 'xxx')
eq_(Review.objects.filter(editorreview=True).count(), 1)
def test_bad_flag(self):
response = self.client.post(self.url, {'flag': 'xxx'})
eq_(response.status_code, 400)
eq_(Review.objects.filter(editorreview=True).count(), 0)
class TestDelete(ReviewTest):
def setUp(self):
super(TestDelete, self).setUp()
self.url = helpers.url('addons.reviews.delete',
self.addon.slug, 218207)
self.login_admin()
def test_no_login(self):
self.client.logout()
response = self.client.post(self.url)
eq_(response.status_code, 401)
def test_no_perms(self):
GroupUser.objects.all().delete()
response = self.client.post(self.url)
eq_(response.status_code, 403)
def test_404(self):
url = helpers.url('addons.reviews.delete', self.addon.slug, 0)
response = self.client.post(url)
eq_(response.status_code, 404)
def test_delete_review_with_dev_reply(self):
cnt = Review.objects.count()
response = self.client.post(self.url)
eq_(response.status_code, 200)
# Two are gone since we deleted a review with a reply.
eq_(Review.objects.count(), cnt - 2)
def test_delete_success(self):
Review.objects.update(reply_to=None)
cnt = Review.objects.count()
response = self.client.post(self.url)
eq_(response.status_code, 200)
eq_(Review.objects.count(), cnt - 1)
def test_delete_own_review(self):
self.client.logout()
self.login_dev()
url = helpers.url('addons.reviews.delete', self.addon.slug, 218468)
cnt = Review.objects.count()
response = self.client.post(url)
eq_(response.status_code, 200)
eq_(Review.objects.count(), cnt - 1)
eq_(Review.objects.filter(pk=218468).exists(), False)
def test_reviewer_can_delete(self):
# Test an editor can delete a review if not listed as an author.
user = UserProfile.objects.get(email='trev@adblockplus.org')
# Remove user from authors.
AddonUser.objects.filter(addon=self.addon).delete()
# Make user an add-on reviewer.
group = Group.objects.create(name='Reviewer', rules='Addons:Review')
GroupUser.objects.create(group=group, user=user)
self.client.logout()
self.login_dev()
cnt = Review.objects.count()
response = self.client.post(self.url)
eq_(response.status_code, 200)
# Two are gone since we deleted a review with a reply.
eq_(Review.objects.count(), cnt - 2)
eq_(Review.objects.filter(pk=218207).exists(), False)
def test_editor_own_addon_cannot_delete(self):
# Test an editor cannot delete a review if listed as an author.
user = UserProfile.objects.get(email='trev@adblockplus.org')
# Make user an add-on reviewer.
group = Group.objects.create(name='Reviewer', rules='Addons:Review')
GroupUser.objects.create(group=group, user=user)
self.client.logout()
self.login_dev()
cnt = Review.objects.count()
response = self.client.post(self.url)
eq_(response.status_code, 403)
eq_(Review.objects.count(), cnt)
eq_(Review.objects.filter(pk=218207).exists(), True)
class TestCreate(ReviewTest):
def setUp(self):
super(TestCreate, self).setUp()
self.add = helpers.url('addons.reviews.add', self.addon.slug)
self.client.login(username='root_x@ukr.net', password='password')
self.user = UserProfile.objects.get(email='root_x@ukr.net')
self.qs = Review.objects.filter(addon=1865)
self.log_count = ActivityLog.objects.count
self.more = self.addon.get_url_path(more=True)
self.list = helpers.url('addons.reviews.list', self.addon.slug)
def test_add_logged(self):
r = self.client.get(self.add)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'reviews/add.html')
def test_add_admin(self):
self.login_admin()
def test_add_link_visitor(self):
"""
Ensure non-logged user can see Add Review links on details page
but not on Reviews listing page.
"""
self.client.logout()
r = self.client.get_ajax(self.more)
eq_(pq(r.content)('#add-review').length, 1)
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
doc = pq(r.content)
eq_(doc('#add-review').length, 0)
eq_(doc('#add-first-review').length, 0)
def test_add_link_logged(self):
"""Ensure logged user can see Add Review links."""
r = self.client.get_ajax(self.more)
eq_(pq(r.content)('#add-review').length, 1)
r = self.client.get(self.list)
doc = pq(r.content)
eq_(doc('#add-review').length, 1)
eq_(doc('#add-first-review').length, 0)
def test_add_link_dev(self):
"""Ensure developer cannot see Add Review links."""
self.login_dev()
r = self.client.get_ajax(self.more)
eq_(pq(r.content)('#add-review').length, 0)
r = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
doc = pq(r.content)
eq_(doc('#add-review').length, 0)
eq_(doc('#add-first-review').length, 0)
def test_list_none_add_review_link_visitor(self):
"""If no reviews, ensure visitor user cannot see Add Review link."""
Review.objects.all().delete()
self.client.logout()
r = self.client.get(self.list)
doc = pq(r.content)('#reviews')
eq_(doc('#add-review').length, 0)
eq_(doc('#no-add-first-review').length, 0)
eq_(doc('#add-first-review').length, 1)
def test_list_none_add_review_link_logged(self):
"""If no reviews, ensure logged user can see Add Review link."""
Review.objects.all().delete()
r = self.client.get(self.list)
doc = pq(r.content)
eq_(doc('#add-review').length, 1)
eq_(doc('#no-add-first-review').length, 0)
eq_(doc('#add-first-review').length, 1)
def test_list_none_add_review_link_dev(self):
"""If no reviews, ensure developer can see Add Review link."""
Review.objects.all().delete()
self.login_dev()
r = self.client.get(self.list)
doc = pq(r.content)('#reviews')
eq_(doc('#add-review').length, 0)
eq_(doc('#no-add-first-review').length, 1)
eq_(doc('#add-first-review').length, 0)
def test_body_has_url(self):
""" test that both the create and revise reviews segments properly
note reviews that contain URL like patterns for editorial review
"""
for body in ['url http://example.com', 'address 127.0.0.1',
'url https://example.com/foo/bar', 'host example.org',
'quote example%2eorg', 'IDNA www.xn--ie7ccp.xxx']:
self.client.post(self.add, {'body': body, 'rating': 2})
ff = Review.objects.filter(addon=self.addon)
rf = ReviewFlag.objects.filter(review=ff[0])
eq_(ff[0].flag, True)
eq_(ff[0].editorreview, True)
eq_(rf[0].note, 'URLs')
def test_cant_review_unlisted_addon(self):
"""Can't review an unlisted addon."""
self.addon.update(is_listed=False)
assert self.client.get(self.add).status_code == 404
class TestEdit(ReviewTest):
def setUp(self):
super(TestEdit, self).setUp()
self.client.login(username='root_x@ukr.net', password='password')
def test_edit(self):
url = helpers.url('addons.reviews.edit', self.addon.slug, 218207)
response = self.client.post(url, {'rating': 2, 'body': 'woo woo'},
X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
assert '%s' % Review.objects.get(id=218207).body == 'woo woo'
response = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
doc = pq(response.content)
assert doc('#review-218207 .review-edit').text() == 'Edit review'
def test_edit_not_owner(self):
url = helpers.url('addons.reviews.edit', self.addon.slug, 218468)
r = self.client.post(url, {'rating': 2, 'body': 'woo woo'},
X_REQUESTED_WITH='XMLHttpRequest')
eq_(r.status_code, 403)
def test_edit_reply(self):
self.login_dev()
url = helpers.url('addons.reviews.edit', self.addon.slug, 218468)
response = self.client.post(url, {'title': 'fo', 'body': 'shizzle'},
X_REQUESTED_WITH='XMLHttpRequest')
assert response.status_code == 200
reply = Review.objects.get(id=218468)
assert '%s' % reply.title == 'fo'
assert '%s' % reply.body == 'shizzle'
response = self.client.get(helpers.url('addons.reviews.list',
self.addon.slug))
doc = pq(response.content)
assert doc('#review-218468 .review-edit').text() == 'Edit reply'
class TestTranslate(ReviewTest):
def setUp(self):
super(TestTranslate, self).setUp()
self.create_switch('reviews-translate', db=True)
self.user = UserProfile.objects.get(username='jbalogh')
self.review = Review.objects.create(addon=self.addon, user=self.user,
title='or', body='yes')
def test_regular_call(self):
review = self.review
url = helpers.url('addons.reviews.translate', review.addon.slug,
review.id, 'fr')
r = self.client.get(url)
eq_(r.status_code, 302)
eq_(r.get('Location'), 'https://translate.google.com/#auto/fr/yes')
def test_unicode_call(self):
review = Review.objects.create(addon=self.addon, user=self.user,
title='or', body=u'hรฉhรฉ 3%')
url = helpers.url('addons.reviews.translate',
review.addon.slug, review.id, 'fr')
r = self.client.get(url)
eq_(r.status_code, 302)
eq_(r.get('Location'),
'https://translate.google.com/#auto/fr/h%C3%A9h%C3%A9%203%25')
@mock.patch('reviews.views.requests')
def test_ajax_call(self, requests):
# Mock requests.
response = mock.Mock()
response.status_code = 200
response.json.return_value = {u'data': {u'translations': [{
u'translatedText': u'oui',
u'detectedSourceLanguage': u'en'
}]}}
requests.get.return_value = response
# Call translation.
review = self.review
url = helpers.url('addons.reviews.translate', review.addon.slug,
review.id, 'fr')
r = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(r.status_code, 200)
eq_(json.loads(r.content), {"body": "oui", "title": "oui"})
@mock.patch('waffle.switch_is_active', lambda x: True)
@mock.patch('reviews.views.requests')
def test_invalid_api_key(self, requests):
# Mock requests.
response = mock.Mock()
response.status_code = 400
response.json.return_value = {'error': {'code': 400, 'errors': [{
'domain': 'usageLimits', 'message': 'Bad Request',
'reason': 'keyInvalid'}], 'message': 'Bad Request'}}
requests.get.return_value = response
# Call translation.
review = self.review
url = helpers.url('addons.reviews.translate', review.addon.slug,
review.id, 'fr')
r = self.client.get(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(r.status_code, 400)
class TestMobileReviews(amo.tests.MobileTest, amo.tests.TestCase):
fixtures = ['reviews/dev-reply.json', 'base/admin', 'base/users']
def setUp(self):
super(TestMobileReviews, self).setUp()
self.addon = Addon.objects.get(id=1865)
self.user = UserProfile.objects.get(email='regular@mozilla.com')
self.login_regular()
self.add = helpers.url('addons.reviews.add', self.addon.slug)
self.list = helpers.url('addons.reviews.list', self.addon.slug)
def login_regular(self):
self.client.login(username='regular@mozilla.com', password='password')
def login_dev(self):
self.client.login(username='trev@adblockplus.org', password='password')
def login_admin(self):
self.client.login(username='jbalogh@mozilla.com', password='password')
def test_mobile(self):
self.client.logout()
self.mobile_init()
r = self.client.get(self.list)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'reviews/mobile/review_list.html')
def test_add_visitor(self):
self.client.logout()
self.mobile_init()
r = self.client.get(self.add)
eq_(r.status_code, 302)
def test_add_logged(self):
r = self.client.get(self.add)
eq_(r.status_code, 200)
self.assertTemplateUsed(r, 'reviews/mobile/add.html')
def test_add_admin(self):
self.login_admin()
r = self.client.get(self.add)
eq_(r.status_code, 200)
def test_add_dev(self):
self.login_dev()
r = self.client.get(self.add)
eq_(r.status_code, 403)
def test_add_link_visitor(self):
self.client.logout()
self.mobile_init()
r = self.client.get(self.list)
doc = pq(r.content)
eq_(doc('#add-review').length, 1)
eq_(doc('.copy .login-button').length, 1)
eq_(doc('#review-form').length, 0)
def test_add_link_logged(self):
r = self.client.get(self.list)
doc = pq(r.content)
eq_(doc('#add-review').length, 1)
eq_(doc('#review-form').length, 1)
def test_add_link_dev(self):
self.login_dev()
r = self.client.get(self.list)
doc = pq(r.content)
eq_(doc('#add-review').length, 0)
eq_(doc('#review-form').length, 0)
def test_add_submit(self):
r = self.client.post(self.add, {'body': 'hi', 'rating': 3})
eq_(r.status_code, 302)
r = self.client.get(self.list)
doc = pq(r.content)
text = doc('.review').eq(0).text()
assert "hi" in text
assert "Rated 3 out of 5" in text
def test_add_logged_out(self):
self.client.logout()
self.mobile_init()
r = self.client.get(helpers.url('addons.reviews.add', self.addon.slug))
eq_(r.status_code, 302)
| bsd-3-clause |
Tangcuyu/perfectinfo | lib/tsd/node_modules/prebuild/node_modules/node-ninja/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
callowayproject/django-elections | elections/ap.py | 1 | 37444 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Collects and organizes election results published the Associated Press's
data service.
In order to use this library, you must pay AP for access to the data.
More information can be found on the AP's web site (http://www.apdigitalnews.com/ap_elections.html)
or by contacting Anthony Marquez at amarquez@ap.org.
"""
import os
import csv
import itertools
import calculate
from ftplib import FTP
from datetime import date
from cStringIO import StringIO
from dateutil.parser import parse as dateparse
class AP(object):
"""
The public client you can use to connect to AP's data feed.
Example usage:
>>> from elections import AP
>>> client = AP(USERNAME, PASSWORD)
>>> client.get_state("IA")
"""
FTP_HOSTNAME = 'electionsonline.ap.org'
def __init__(self, username=None, password=None):
self.username = username
self.password = password
self._ftp = None
self._ftp_hits = 0
def __unicode__(self):
return unicode(self.username)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__unicode__())
#
# Public methods
#
@property
def ftp(self):
"""
Checks if we have an active FTP connection.
If not, activates a new connection to the AP.
"""
if not self._ftp or not self._ftp.sock:
self._ftp = FTP(self.FTP_HOSTNAME, self.username, self.password)
self._ftp_hits += 1
return self._ftp
def get_state(self, *args, **kwargs):
"""
Takes a single state postal code, returns an APResult
object for that state.
"""
result = State(self, args[0], **kwargs)
self.ftp.quit()
return result
def get_states(self, *args, **kwargs):
"""
Takes a list of state postal codes, returns a list of APResult
objects.
"""
results = [State(self, state, **kwargs) for state in args]
self.ftp.quit()
return results
def get_topofticket(self, election_date, **kwargs):
"""
Takes a date in any common format (YYYY-MM-DD is preferred)
and returns the results for that date.
"""
try:
dt = dateparse(election_date)
except ValueError:
raise ValueError("The election date you've submitted could not be parsed. Try submitting it in YYYY-MM-DD format.")
result = TopOfTicket(self, dt.strftime("%Y%m%d"), **kwargs)
self.ftp.quit()
return result
#
# Private methods
#
def _fetch(self, path):
"""
Fetch a file from the AP FTP.
Provide a path, get back a file obj with your data.
"""
# Make a file object to store our target
buffer_ = StringIO()
# Craft an FTP command that can pull the file
cmd = 'RETR %s' % path
# Connect to the FTP server, issue the command and catch the data
# in our buffer file object.
try:
self.ftp.retrbinary(cmd, buffer_.write)
except Exception, e:
print cmd
if "550 The system cannot find the" in e.message:
raise FileDoesNotExistError("The file you've requested does not exist." +
" If you're looking for data about a state, make sure you" +
" input valid postal codes. If you're looking for a date," +
" make sure it's correct.")
elif "530 User cannot log in" in e.message:
raise BadCredentialsError("The username and password you submitted" +
" are not accepted by the AP's FTP.")
else:
raise e
# Return the file object
return StringIO(buffer_.getvalue())
def _fetch_csv(self, path, delimiter="|", fieldnames=None):
"""
Fetch a pipe delimited file from the AP FTP.
Provide the path of the file you want.
Returns a list of dictionaries that's ready to roll.
"""
# Fetch the data and stuff it in a CSV DictReaddr
reader = csv.DictReader(
self._fetch(path),
delimiter=delimiter,
fieldnames=fieldnames
)
# Clean up the keys and values, since AP provides them a little messy
return [self._strip_dict(i) for i in reader]
def _strip_dict(self, d):
"""
Strip all leading and trailing whitespace in dictionary keys and values.
This problem is common to the AP's CSV files
"""
return dict((k.strip(), v.strip()) for k, v in d.items() if k != None and v != None)
def _fetch_flatfile(self, path, basicfields, candidatefields):
"""
Retrive, parse and structure one of the AP's flatfiles.
Returns a list of dictionaries with the standard "basicfields" as
top-level keys and then a `candidates` key that contains a nested dictionary
with the candidate data inside.
AP's flatfiles are delimited by ";", do not include headers and include
a dynamic number of fields depending on the number of candidates in the
data set.
Provide:
* The path of the file you want
* The list of basic fields that start each row
* The list of candidate fields that will repeat outwards to the right
for each candidate in the data set.
"""
# Fetch the data and toss it in a CSV reader
reader = csv.reader(
self._fetch(path),
delimiter=";",
)
raw_data = list(reader)
# Loop thorugh the raw data...
prepped_data = []
for row in raw_data:
# Slice off the last field since it's always empty
row = row[:-1]
# Split out the basic fields
basic_data = row[:len(basicfields)]
# Load them into a new dictionary with the proper keys
prepped_dict = dict((basicfields[i], v) for i, v in enumerate(basic_data))
# Split out all the candidate sets that come after the basic fields
candidate_data = row[len(basicfields):]
candidate_sets = self._split_list(candidate_data, len(candidatefields))
# Load candidate data into a list of dicts with the proper keys
prepped_dict['candidates'] = [
dict((candidatefields[i], v) for i, v in enumerate(cand))
for cand in candidate_sets
]
prepped_data.append(prepped_dict)
# Pass it all out
return prepped_data
def _split_list(self, iterable, n, fillvalue=None):
"""
Splits the provided list into groups of n length.
You can optionally provide a value to be included if the last list
comes up short of the n value. By default it's none.
Example usage:
>>> _split_list([1,2,3,4,5,6], 2)
[(1, 2), (3, 4), (5, 6)]
>>> _split_list([1,2,3,4,5], 2, fillvalue="x")
[(1, 2), (3, 4), (5, "x")]
Derived from a snippet published by Stephan202
http://stackoverflow.com/a/1625013
"""
args = [iter(iterable)] * n
return list(itertools.izip_longest(*args, fillvalue=fillvalue))
class BaseAPResults(object):
"""
Base class that defines the methods to retrieve AP CSV
data and shared properties and methods for State and
TopOfTicket objects.
Any class that inherits from BaseAPResults must define
these paths before it calls the parent __init__:
* self.results_file_path
* self.delegates_file_path
* self.race_file_path
* self.reporting_unit_file_path
* self.candidate_file_path
"""
def __init__(self, client, name, results=True, delegates=True):
self.client = client
self.name = name
# The AP results files for these 7 states are missing
# the leading 0 on the county FIPS codes.
if self.name in ('AL', 'AK', 'AZ', 'AR', 'CA', 'CO', 'CT'):
self.leading_zero_fips = True
else:
self.leading_zero_fips = False
self._races = {}
self._reporting_units = {}
self._init_races()
self._init_reporting_units()
self._init_candidates()
if results:
self.fetch_results()
# Fetches delegates for any Primary or Caucus races
if delegates and self.filter_races(is_general=False):
self.fetch_delegates()
def __unicode__(self):
return unicode(self.name)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__unicode__())
#
# Public methods
#
@property
def races(self):
"""
Returns a list of all the races reporting results.
"""
return self._races.values()
def get_race(self, ap_race_number):
"""
Get a single Race object by it's ap_race_number
"""
try:
return self._races[ap_race_number]
except KeyError:
raise KeyError("The race you requested, %s, does not exist." % ap_race_number)
def filter_races(self, **kwargs):
"""
Takes a series of keyword arguments and returns any Race objects
that match. Works an AND query and returns anything that matches
all of the provided kwargs.
ex:
>>> iowa.filter_races(office_name='President', party='GOP')
[<Race: President>]
"""
races = self.races
for k in kwargs.keys():
races = filter(lambda x: getattr(x, k) == kwargs[k], races)
return races
@property
def reporting_units(self):
"""
Get all reporting units
"""
return self._reporting_units.values()
def get_reporting_unit(self, fips):
"""
Get a single ReportinUnit
"""
try:
return self._reporting_units[fips]
except KeyError:
raise KeyError("The reporting unit you requested does not exist.")
@property
def counties(self):
"""
Gets all reporting units that can be defined as counties (read: !states).
Also does a crosswalk to aggregate New England ReportingUnits into their
respective counties.
"""
# Filter out the state level data
ru_list = [o for o in self.reporting_units if o.fips and not o.is_state]
# If the AP reports sub-County data for this state, as they do for some
# New England states, we'll need to aggregate it here. If not, we can
# just pass out the data "as is."
if self.name in COUNTY_CROSSWALK.keys():
d = {}
for ru in ru_list:
try:
d[ru.fips].append(ru)
except:
d[ru.fips] = [ru]
county_list = []
for county, units in d.items():
ru = ReportingUnit(
name = COUNTY_CROSSWALK[self.name][county],
ap_number = '',
fips = county,
abbrev = self.name,
precincts_total = sum([int(i.precincts_total) for i in units]),
num_reg_voters = sum([int(i.num_reg_voters) for i in units]),
)
county_list.append(ru)
return county_list
else:
return ru_list
def fetch_results(self):
"""
This will fetch and fill out all of the results. If called again,
it will simply run through and update all of the results with
the most fresh data from the AP.
"""
self._get_flat_results()
def fetch_delegates(self):
"""
This will fetch and fill out the delegate_total variable on
the candidate models with the statewide results.
"""
self._get_flat_delegates()
#
# Private methods
#
def _init_candidates(self):
"""
Download the state's candidate file and load the data.
"""
# Fetch the data from the FTP
candidate_list = self.client._fetch_csv(self.candidate_file_path)
# Loop through it...
for cand in candidate_list:
# Create a Candidate...
candidate = Candidate(
first_name = cand['pol_first_name'],
middle_name = cand['pol_middle_name'],
last_name = cand['pol_last_name'],
ap_race_number = cand['ra_number'],
ap_natl_number = cand['pol_nat_id'],
ap_polra_number = cand['polra_number'],
ap_pol_number = cand['pol_number'],
abbrev_name = cand['pol_abbrv'],
suffix = cand['pol_junior'],
party = cand['polra_party'],
# use_suffix?
)
self._races[candidate.ap_race_number].add_candidate(candidate)
def _init_races(self):
"""
Download all the races in the state and load the data.
"""
# Get the data
race_list = self.client._fetch_csv(self.race_file_path)
# Loop through it all
for race in race_list:
# Create a Race object...
print "Creating race: ", race['ra_number']
race = Race(
ap_race_number = race['ra_number'],
office_name = race['ot_name'],
office_description = race['of_description'],
office_id = race['office_id'],
race_type = race['race_id'],
seat_name = race['se_name'],
seat_number = race['se_number'],
state_postal = race.get('st_postal', None),
scope = race['of_scope'],
date = date(*map(int, [race['el_date'][:4], race['el_date'][4:6], race['el_date'][6:]])),
num_winners = int(race['ra_num_winners']),
party = race['rt_party_name'],
uncontested = race['ra_uncontested'] == '1',
)
# And add it to the global store
self._races.update({race.ap_race_number: race})
def _init_reporting_units(self):
"""
Download all the reporting units and load the data.
"""
# Get the data
ru_list = self.client._fetch_csv(self.reporting_unit_file_path)
# Loop through them all
for r in ru_list:
# if `st_postal` is in the dict, we're getting Top of the Ticket data,
# so we want to put reportingunits in the state they belong to.
# otherwise stuff the RUs into all of the races, as they're all in the same state.
races = self.filter_races(state_postal=r.get('st_postal', None)) or self.races
# Create ReportingUnit objects for each race
for race in races:
ru = ReportingUnit(
name = r['ru_name'],
ap_number = r['ru_number'],
fips = r['ru_fip'],
abbrev = r['ru_abbrv'],
precincts_total = int(r['ru_precincts']),
num_reg_voters = int(r['ru_reg_voters']),
)
# And add them to the global store
race._reporting_units.update({ru.key: ru})
# We add a set of reportingunits for the State object
# so you can get county and state voter info from the
# State object itself.
ru = ReportingUnit(
name = r['ru_name'],
ap_number = r['ru_number'],
fips = r['ru_fip'],
abbrev = r['ru_abbrv'],
precincts_total = int(r['ru_precincts']),
num_reg_voters = int(r['ru_reg_voters']),
)
self._reporting_units.update({ru.key: ru})
def _get_flat_delegates(self):
"""
Download statewide delegate totals and load it into Candidates.
"""
# Pull the data
flat_list = self.client._fetch_flatfile(
self.delegates_file_path,
[ # First the basic fields that will the same in each row
'test',
'election_date',
'state_postal',
'district_type',
'district_number',
'district_name',
'race_number',
'office_id',
'race_type_id',
'seat_number',
'office_name',
'seat_name',
'race_type_party',
'race_type',
'office_description',
'number_of_winners',
'number_in_runoff',
'precincts_reporting',
'total_precincts',
],
[ # Then the candidate fields that will repeat after the basics
'candidate_number',
'order',
'party',
'first_name',
'middle_name',
'last_name',
'junior',
'use_junior',
'incumbent',
'delegates',
'vote_count',
'is_winner',
'national_politician_id',
]
)
# Filter it down to the state level results
state_data = [i for i in flat_list if i['district_number'] == '1']
# Loop through them
for row in state_data:
# Get the race
race = self.get_race(row['race_number'])
# Loop through the candidates in that race
for cand in row['candidates']:
# And if it's a legit candidate, cuz sometimes they come out
# blank at the end of the file.
if cand['candidate_number']:
# Grab the candidate
candidate = race.get_candidate(cand['candidate_number'])
# Set the delegates
candidate.delegates = int(cand['delegates'])
def _get_flat_results(self, ftp=None):
"""
Download, parse and structure the state and county votes totals.
"""
# Download the data
print "Fetching results file:", self.results_file_path
flat_list = self.client._fetch_flatfile(
self.results_file_path,
[ # First the basic fields that will the same in each row
'test',
'election_date',
'state_postal',
'county_number',
'fips',
'county_name',
'race_number',
'office_id',
'race_type_id',
'seat_number',
'office_name',
'seat_name',
'race_type_party',
'race_type',
'office_description',
'number_of_winners',
'number_in_runoff',
'precincts_reporting',
'total_precincts',
],
[ # Then the candidate fields that will repeat after the basics
'candidate_number',
'order',
'party',
'first_name',
'middle_name',
'last_name',
'junior',
'use_junior',
'incumbent',
'vote_count',
'is_winner',
'national_politician_id',
]
)
# Figure out if we're dealing with test data or the real thing
self.is_test = flat_list[0]['test'] == 't'
# Start looping through the lines...
for row in flat_list:
# Get the race
try:
race = self.get_race(row['race_number'])
except KeyError:
continue
# Figure out if it's a state or a county
fips =row['fips']
is_state = row['county_number'] == '1'
county_number = str(row['county_number'])
# AP stupidly strips leading 0s in the FIPS for the
# results file. This fixes em.
# if is_state:
# fips = '00000'
# else:
# if self.leading_zero_fips and fips[0] != '0':
# fips = '0' + fips
# Pull the reporting unit
reporting_unit = race.get_reporting_unit("%s%s" % (row['county_name'], county_number))
# Loop through all the candidates
votes_cast = 0
for cand in row['candidates']:
# Skip it if the candidate is empty, as it sometimes is at
# the end of the row
if not cand['candidate_number']:
continue
# Pull the existing candidate object
candidate = race.get_candidate(cand["candidate_number"])
# Pull the vote total
vote_count = int(cand['vote_count'])
# Add it to the overall total
votes_cast += vote_count
# Update the candidate's global vote total if data are statewide
if is_state:
candidate.vote_total = vote_count
# Set is_winner and is_runoff
# (This will just get set over and over as we loop
# but AP seems to put the statewide result in for every
# reporting unit so I think we're safe.)
candidate.is_winner = cand['is_winner'] == 'X'
candidate.is_runoff = cand['is_winner'] == 'R'
# Create the Result object, which is specific to the
# reporting unit in this row of the flatfile.
result = Result(
candidate = candidate,
vote_total = vote_count,
reporting_unit = reporting_unit
)
# Update result connected to the reporting unit
reporting_unit.update_result(result)
# Update the reporting unit's precincts status
reporting_unit.precincts_reporting = int(row['precincts_reporting'])
reporting_unit.precincts_reporting_percent = calculate.percentage(
reporting_unit.precincts_reporting,
reporting_unit.precincts_total
)
# Update the total votes cast
reporting_unit.votes_cast = votes_cast
# Loop back through the results and set the percentages now
# that we know the overall total
for result in reporting_unit.results:
result.vote_total_percent = calculate.percentage(
result.vote_total,
votes_cast
)
class State(BaseAPResults):
"""
One of these United States.
Returned by the AP client in response to a `get_state` or `get_states`
call. Contains, among its attributes, the results for all races recorded
by the AP.
"""
def __init__(self, client, name, results=True, delegates=True):
self.results_file_path = "/%(name)s/flat/%(name)s.txt" % {'name': name}
self.delegates_file_path = "/%(name)s/flat/%(name)s_D.txt" % {'name': name}
self.race_file_path = "/inits/%(name)s/%(name)s_race.txt" % {'name': name}
self.reporting_unit_file_path = "/inits/%(name)s/%(name)s_ru.txt" % {'name': name}
self.candidate_file_path = "/inits/%(name)s/%(name)s_pol.txt" % {'name': name}
super(State, self).__init__(client, name, results, delegates)
class TopOfTicket(BaseAPResults):
"""
These United States.
Returned by the AP client in response to a `get_topofticket`
call. Contains, among its attributes, the results for all races recorded
by the AP.
"""
def __init__(self, client, name, results=True, delegates=True):
self.results_file_path = "/Delegate_Tracking/US/flat/US_%(name)s.txt" % {'name': name}
self.delegates_file_path = "/Delegate_Tracking/US/flat/US_%(name)s_d.txt" % {'name': name}
self.race_file_path = "/inits/US/US_%(name)s_race.txt" % {'name': name}
self.reporting_unit_file_path = "/inits/US/US_%(name)s_ru.txt" % {'name': name}
self.candidate_file_path = "/inits/US/US_%(name)s_pol.txt" % {'name': name}
super(TopOfTicket, self).__init__(client, name, results, delegates)
@property
def states(self):
return [o for o in self._reporting_units.values() if o.is_state]
class Race(object):
"""
A contest being decided by voters choosing between candidates.
For example:
* The presidential general election
* The governorship of Maine
* Proposition 8 in California
"""
_race_types = {
'D': 'Dem Primary',
'R': 'GOP Primary',
'G': 'General Election',
'E': 'Dem Caucus',
'S': 'GOP Caucus',
'L': 'Libertarian', # Not documented by the AP, but that's what it appears to be.
}
def __init__(self, ap_race_number=None, office_name=None, office_description=None,
office_id=None, seat_name=None, seat_number=None, state_postal=None, scope=None,
date=None, num_winners=None, race_type=None, party=None, uncontested=None,
precincts_total=None, precincts_reporting=None,
precincts_reporting_percent=None, votes_cast=None):
self.ap_race_number = ap_race_number
self.office_name = office_name
self.office_description = office_description
self.office_id = office_id
self.seat_name = seat_name
self.seat_number = seat_number
self.state_postal = state_postal
self.scope = scope
self.date = date
self.num_winners = num_winners
self.race_type = race_type
self.party = party
self.uncontested = uncontested
self._candidates = {}
self._reporting_units = {}
def __unicode__(self):
return unicode(self.name)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__unicode__())
@property
def name(self):
name = ''
if self.scope == 'L':
if self.office_description:
name = '%s %s - %s' % (self.office_name, self.seat_name, self.office_description)
else:
name = '%s %s' % (self.office_name, self.seat_name)
else:
if self.office_name == "Proposition":
num = self.seat_name.split('-')[0].strip()
name = "%s %s" % (self.office_name, num)
else:
name = '%s' % self.office_name
if not self.is_general:
name = '%s - %s' % (self.race_type_name, name)
return name
@property
def candidates(self):
return sorted(self._candidates.values(), key=lambda x: x.last_name)
def get_candidate(self, ap_polra_num):
"""
Takes AP's polra number and returns a Candidate object.
"""
return self._candidates.get(ap_polra_num, None)
def add_candidate(self, candidate):
self._candidates.update({candidate.ap_polra_number: candidate})
def get_reporting_unit(self, number):
"""
Get a single ReportingUnit
"""
return self._reporting_units.get(number, None)
@property
def reporting_units(self):
"""
Returns all reporting units that belong to this race as a list of
ReportingUnit objects.
"""
return self._reporting_units.values()
@property
def state(self):
"""
Returns the state-level results for this race as a ReportingUnit object.
"""
states = [o for o in self.reporting_units if o.is_state]
if len(states) == 1:
return states[0]
else:
for s in states:
if s.abbrev == self.state_postal:
return s
@property
def counties(self):
"""
Returns all the counties that report results for this race as a list
of ReportingUnit objects.
"""
ru_list = sorted(
[o for o in self.reporting_units if o.fips and not o.is_state],
key=lambda x: x.name
)
# If the AP reports sub-County data for this state, as they do for some
# New England states, we'll need to aggregate it here. If not, we can
# just pass out the data "as is."
if self.state.abbrev in COUNTY_CROSSWALK.keys():
d = {}
for ru in ru_list:
try:
d[ru.fips].append(ru)
except KeyError:
d[ru.fips] = [ru]
county_list = []
for county, units in d.items():
ru = ReportingUnit(
name = COUNTY_CROSSWALK[self.state.abbrev][county],
ap_number = '',
fips = county,
abbrev = self.name,
precincts_reporting = sum([int(i.precincts_reporting) for i in units]),
precincts_total = sum([int(i.precincts_total) for i in units]),
num_reg_voters = sum([int(i.num_reg_voters) for i in units]),
votes_cast = sum([int(i.votes_cast) for i in units])
)
ru.precincts_reporting_percent = calculate.percentage(
ru.precincts_reporting,
ru.precincts_total
)
# Group all the candidates
cands = {}
for unit in units:
for result in unit.results:
try:
cands[result.candidate.ap_polra_number].append(result)
except KeyError:
cands[result.candidate.ap_polra_number] = [result]
for ap_polra_number, results in cands.items():
combined = Result(
candidate = results[0].candidate,
reporting_unit = ru,
vote_total = sum([i.vote_total for i in results]),
vote_total_percent = calculate.percentage(
sum([i.vote_total for i in results]),
ru.votes_cast
)
)
# Update result connected to the reporting unit
ru.update_result(combined)
# Load the finished county into our list
county_list.append(ru)
return county_list
else:
return ru_list
return ru_list
@property
def race_type_name(self):
"""
Returns a descriptive name for the race_type.
"""
return self._race_types.get(self.race_type, None)
@property
def is_primary(self):
return self.race_type in ('D', 'R',)
@property
def is_caucus(self):
return self.race_type in ('E', 'S',)
@property
def is_general(self):
return self.race_type == 'G'
class ReportingUnit(object):
"""
An area or unit that groups votes into a total.
For instance, a state, a congressional district, a county.
"""
def __init__(self, ap_number=None, name=None, abbrev=None, fips=None,
precincts_total=None, num_reg_voters=None, votes_cast=None,
precincts_reporting=None, precincts_reporting_percent=None):
self.ap_number = ap_number
self.name = name
self.abbrev = abbrev
self.fips = fips
self.num_reg_voters = num_reg_voters
self.votes_cast = votes_cast
self.precincts_total = precincts_total
self.precincts_reporting = precincts_reporting
self.precincts_reporting_percent = precincts_reporting_percent
self._results = {}
def __unicode__(self):
name = self.name
if self.is_state:
name = '%s (state)' % name
return unicode(name)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__unicode__())
@property
def key(self):
return "%(name)s%(ap_number)s" % self.__dict__
@property
def results(self):
"""
Returns the Result objects sorted by total votes (highest first).
If no votes are in, it returns the candidates in alphabetical order.
"""
if self.votes_cast:
return sorted(self._results.values(), key=lambda x: x.vote_total,
reverse=True)
else:
return sorted(self._results.values(), key=lambda x: x.candidate.last_name)
def update_result(self, result):
self._results[result.candidate.ap_polra_number] = result
@property
def is_state(self):
return self.fips == '00000'
class Candidate(object):
"""
A choice for voters in a race.
In the presidential race, a person, like Barack Obama.
In a ballot measure, a direction, like Yes or No.
"""
def __init__(self, first_name=None, middle_name=None, last_name=None,
abbrev_name=None, suffix=None, use_suffix=False,
ap_natl_number=None, ap_polra_number=None, ap_race_number=None,
party=None, ap_pol_number=None, is_winner=None,
is_runoff=None, delegates=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.abbrev_name = abbrev_name
self.suffix = suffix
self.use_suffix = use_suffix
self.ap_natl_number = ap_natl_number
self.ap_polra_number = ap_polra_number
self.ap_race_number = ap_race_number
self.ap_pol_number = ap_pol_number
self.party = party
self.is_winner = is_winner
self.is_runoff = is_runoff
self.delegates = delegates
def __unicode__(self):
return unicode(self.name)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__unicode__())
@property
def name(self):
if not self.last_name in ('Yes', 'No'):
s = u'%s %s' % (self.first_name, self.last_name)
return s.strip()
else:
return u'%s' % self.last_name
class Result(object):
"""
The vote count for a candidate in a race in a particular reporting unit.
"""
def __init__(self, candidate=None, reporting_unit=None, vote_total=None,
vote_total_percent=None):
self.candidate = candidate
self.reporting_unit = reporting_unit
self.vote_total = vote_total
self.vote_total_percent = vote_total_percent
def __unicode__(self):
return u'%s, %s, %s' % (self.candidate, self.reporting_unit,
self.vote_total)
def __str__(self):
return self.__unicode__().encode("utf-8")
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.__unicode__())
#
# Errors
#
class FileDoesNotExistError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class BadCredentialsError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
#
# Town-to-county crosswalk
#
COUNTY_CROSSWALK = {
'NH': {
'33001': 'Belknap',
'33003': 'Carroll',
'33005': 'Chesire',
'33007': 'Coos',
'33009': 'Grafton',
'33011': 'Hillborough',
'33013': 'Merrimack',
'33015': 'Rockingham',
'33017': 'Strafford',
'33019': 'Sullivan',
},
'VT': {
'50001': 'Addison',
'50003': 'Bennington',
'50005': 'Caledonia',
'50007': 'Chittenden',
'50009': 'Essex',
'50011': 'Franklin',
'50013': 'Grand Isle',
'50015': 'Lamoille',
'50017': 'Orange',
'50019': 'Orleans',
'50021': 'Rutland',
'50023': 'Washington',
'50025': 'Windham',
'50027': 'Windsor',
}
}
| apache-2.0 |
mindnervestech/mnrp | addons/account/wizard/account_journal_select.py | 385 | 2068 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class account_journal_select(osv.osv_memory):
"""
Account Journal Select
"""
_name = "account.journal.select"
_description = "Account Journal Select"
def action_open_window(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
if context is None:
context = {}
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_move_line_select')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id])[0]
cr.execute('select journal_id, period_id from account_journal_period where id=%s', (context['active_id'],))
res = cr.fetchone()
if res:
journal_id, period_id = res
result['domain'] = str([('journal_id', '=', journal_id), ('period_id', '=', period_id)])
result['context'] = str({'journal_id': journal_id, 'period_id': period_id})
return result
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rnestler/servo | components/script/dom/bindings/codegen/parser/tests/test_record.py | 52 | 1595 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
dictionary Dict {};
interface RecordArg {
void foo(record<DOMString, Dict> arg);
};
""")
results = parser.finish()
harness.check(len(results), 2, "Should know about two things");
harness.ok(isinstance(results[1], WebIDL.IDLInterface),
"Should have an interface here");
members = results[1].members
harness.check(len(members), 1, "Should have one member")
harness.ok(members[0].isMethod(), "Should have method")
signature = members[0].signatures()[0]
args = signature[1]
harness.check(len(args), 1, "Should have one arg")
harness.ok(args[0].type.isRecord(), "Should have a record type here")
harness.ok(args[0].type.inner.isDictionary(),
"Should have a dictionary inner type")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface RecordVoidArg {
void foo(record<DOMString, void> arg);
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw, "Should have thrown because record can't have void as value type.")
parser = parser.reset()
threw = False
try:
parser.parse("""
dictionary Dict {
record<DOMString, Dict> val;
};
""")
results = parser.finish()
except Exception,x:
threw = True
harness.ok(threw,
"Should have thrown on dictionary containing itself via record.")
| mpl-2.0 |
prakritish/ansible | test/units/modules/cloud/amazon/test_ec2_vpc_nat_gateway.py | 41 | 17149 | import pytest
import unittest
boto3 = pytest.importorskip("boto3")
botocore = pytest.importorskip("botocore")
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
import ansible.modules.cloud.amazon.ec2_vpc_nat_gateway as ng
Options = (
namedtuple(
'Options', [
'connection', 'module_path', 'forks', 'become', 'become_method',
'become_user', 'remote_user', 'private_key_file', 'ssh_common_args',
'sftp_extra_args', 'scp_extra_args', 'ssh_extra_args', 'verbosity',
'check'
]
)
)
# initialize needed objects
variable_manager = VariableManager()
loader = DataLoader()
options = (
Options(
connection='local',
module_path='cloud/amazon',
forks=1, become=None, become_method=None, become_user=None, check=True,
remote_user=None, private_key_file=None, ssh_common_args=None,
sftp_extra_args=None, scp_extra_args=None, ssh_extra_args=None,
verbosity=3
)
)
passwords = dict(vault_pass='')
aws_region = 'us-west-2'
# create inventory and pass to var manager
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost')
variable_manager.set_inventory(inventory)
def run(play):
tqm = None
results = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback='default',
)
results = tqm.run(play)
finally:
if tqm is not None:
tqm.cleanup()
return tqm, results
class AnsibleVpcNatGatewayTasks(unittest.TestCase):
def test_create_gateway_using_allocation_id(self):
play_source = dict(
name = "Create new nat gateway with eip allocation-id",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-12345678',
allocation_id='eipalloc-12345678',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.failUnless(tqm._stats.changed['localhost'] == 1)
def test_create_gateway_using_allocation_id_idempotent(self):
play_source = dict(
name = "Create new nat gateway with eip allocation-id",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-123456789',
allocation_id='eipalloc-1234567',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertFalse('localhost' in tqm._stats.changed)
def test_create_gateway_using_eip_address(self):
play_source = dict(
name = "Create new nat gateway with eip address",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-12345678',
eip_address='55.55.55.55',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.failUnless(tqm._stats.changed['localhost'] == 1)
def test_create_gateway_using_eip_address_idempotent(self):
play_source = dict(
name = "Create new nat gateway with eip address",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
subnet_id='subnet-123456789',
eip_address='55.55.55.55',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertFalse('localhost' in tqm._stats.changed)
def test_create_gateway_in_subnet_only_if_one_does_not_exist_already(self):
play_source = dict(
name = "Create new nat gateway only if one does not exist already",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
if_exist_do_not_create='yes',
subnet_id='subnet-123456789',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertFalse('localhost' in tqm._stats.changed)
def test_delete_gateway(self):
play_source = dict(
name = "Delete Nat Gateway",
hosts = 'localhost',
gather_facts = 'no',
tasks = [
dict(
action=dict(
module='ec2_vpc_nat_gateway',
args=dict(
nat_gateway_id='nat-123456789',
state='absent',
wait='yes',
region=aws_region,
)
),
register='nat_gateway',
),
dict(
action=dict(
module='debug',
args=dict(
msg='{{nat_gateway}}'
)
)
)
]
)
play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
tqm, results = run(play)
self.failUnless(tqm._stats.ok['localhost'] == 2)
self.assertTrue('localhost' in tqm._stats.changed)
class AnsibleEc2VpcNatGatewayFunctions(unittest.TestCase):
def test_convert_to_lower(self):
example = ng.DRY_RUN_GATEWAY_UNCONVERTED
converted_example = ng.convert_to_lower(example[0])
keys = list(converted_example.keys())
keys.sort()
for i in range(len(keys)):
if i == 0:
self.assertEqual(keys[i], 'create_time')
if i == 1:
self.assertEqual(keys[i], 'nat_gateway_addresses')
gw_addresses_keys = list(converted_example[keys[i]][0].keys())
gw_addresses_keys.sort()
for j in range(len(gw_addresses_keys)):
if j == 0:
self.assertEqual(gw_addresses_keys[j], 'allocation_id')
if j == 1:
self.assertEqual(gw_addresses_keys[j], 'network_interface_id')
if j == 2:
self.assertEqual(gw_addresses_keys[j], 'private_ip')
if j == 3:
self.assertEqual(gw_addresses_keys[j], 'public_ip')
if i == 2:
self.assertEqual(keys[i], 'nat_gateway_id')
if i == 3:
self.assertEqual(keys[i], 'state')
if i == 4:
self.assertEqual(keys[i], 'subnet_id')
if i == 5:
self.assertEqual(keys[i], 'vpc_id')
def test_get_nat_gateways(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, stream = (
ng.get_nat_gateways(client, 'subnet-123456789', check_mode=True)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertTrue(success)
self.assertEqual(stream, should_return)
def test_get_nat_gateways_no_gateways_found(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, stream = (
ng.get_nat_gateways(client, 'subnet-1234567', check_mode=True)
)
self.assertTrue(success)
self.assertEqual(stream, [])
def test_wait_for_status(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, gws = (
ng.wait_for_status(
client, 5, 'nat-123456789', 'available', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS[0]
self.assertTrue(success)
self.assertEqual(gws, should_return)
def test_wait_for_status_to_timeout(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, gws = (
ng.wait_for_status(
client, 2, 'nat-12345678', 'available', check_mode=True
)
)
self.assertFalse(success)
self.assertEqual(gws, {})
def test_gateway_in_subnet_exists_with_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', 'eipalloc-1234567', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertEqual(gws, should_return)
def test_gateway_in_subnet_exists_with_allocation_id_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', 'eipalloc-123', check_mode=True
)
)
should_return = list()
self.assertEqual(gws, should_return)
def test_gateway_in_subnet_exists_without_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
gws, err_msg = (
ng.gateway_in_subnet_exists(
client, 'subnet-123456789', check_mode=True
)
)
should_return = ng.DRY_RUN_GATEWAYS
self.assertEqual(gws, should_return)
def test_get_eip_allocation_id_by_address(self):
client = boto3.client('ec2', region_name=aws_region)
allocation_id, _ = (
ng.get_eip_allocation_id_by_address(
client, '55.55.55.55', check_mode=True
)
)
should_return = 'eipalloc-1234567'
self.assertEqual(allocation_id, should_return)
def test_get_eip_allocation_id_by_address_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
allocation_id, err_msg = (
ng.get_eip_allocation_id_by_address(
client, '52.52.52.52', check_mode=True
)
)
self.assertEqual(err_msg, 'EIP 52.52.52.52 does not exist')
self.assertTrue(allocation_id is None)
def test_allocate_eip_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, err_msg, eip_id = (
ng.allocate_eip_address(
client, check_mode=True
)
)
self.assertTrue(success)
def test_release_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, _ = (
ng.release_address(
client, 'eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
def test_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.create(
client, 'subnet-123456', 'eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_pre_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_pre_create_idemptotent_with_allocation_id(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', allocation_id='eipalloc-1234567', check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_pre_create_idemptotent_with_eip_address(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', eip_address='55.55.55.55', check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_pre_create_idemptotent_if_exist_do_not_create(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, results = (
ng.pre_create(
client, 'subnet-123456789', if_exist_do_not_create=True, check_mode=True
)
)
self.assertTrue(success)
self.assertFalse(changed)
def test_delete(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-123456789', check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_delete_and_release_ip(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-123456789', release_eip=True, check_mode=True
)
)
self.assertTrue(success)
self.assertTrue(changed)
def test_delete_if_does_not_exist(self):
client = boto3.client('ec2', region_name=aws_region)
success, changed, err_msg, _ = (
ng.remove(
client, 'nat-12345', check_mode=True
)
)
self.assertFalse(success)
self.assertFalse(changed)
| gpl-3.0 |
jkern/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/compat/_scons_builtins.py | 61 | 5039 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Portions of the following are derived from the compat.py file in
# Twisted, under the following copyright:
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories
__doc__ = """
Compatibility idioms for builtins names
This module adds names to the builtins module for things that we want
to use in SCons but which don't show up until later Python versions than
the earliest ones we support.
This module checks for the following builtins names:
all()
any()
sorted()
memoryview()
Implementations of functions are *NOT* guaranteed to be fully compliant
with these functions in later versions of Python. We are only concerned
with adding functionality that we actually use in SCons, so be wary
if you lift this code for other uses. (That said, making these more
nearly the same as later, official versions is still a desirable goal,
we just don't need to be obsessive about it.)
If you're looking at this with pydoc and various names don't show up in
the FUNCTIONS or DATA output, that means those names are already built in
to this version of Python and we don't need to add them from this module.
"""
__revision__ = "src/engine/SCons/compat/_scons_builtins.py 5134 2010/08/16 23:02:40 bdeegan"
import builtins
try:
all
except NameError:
# Pre-2.5 Python has no all() function.
def all(iterable):
"""
Returns True if all elements of the iterable are true.
"""
for element in iterable:
if not element:
return False
return True
builtins.all = all
all = all
try:
any
except NameError:
# Pre-2.5 Python has no any() function.
def any(iterable):
"""
Returns True if any element of the iterable is true.
"""
for element in iterable:
if element:
return True
return False
builtins.any = any
any = any
try:
memoryview
except NameError:
# Pre-2.7 doesn't have the memoryview() built-in.
class memoryview(object):
def __init__(self, obj):
# wrapping buffer in () keeps the fixer from changing it
self.obj = (buffer)(obj)
def __getitem__(self, indx):
if isinstance(indx, slice):
return self.obj[indx.start:indx.stop]
else:
return self.obj[indx]
builtins.memoryview = memoryview
try:
sorted
except NameError:
# Pre-2.4 Python has no sorted() function.
#
# The pre-2.4 Python list.sort() method does not support
# list.sort(key=) nor list.sort(reverse=) keyword arguments, so
# we must implement the functionality of those keyword arguments
# by hand instead of passing them to list.sort().
def sorted(iterable, cmp=None, key=None, reverse=False):
if key is not None:
result = [(key(x), x) for x in iterable]
else:
result = iterable[:]
if cmp is None:
# Pre-2.3 Python does not support list.sort(None).
result.sort()
else:
result.sort(cmp)
if key is not None:
result = [t1 for t0,t1 in result]
if reverse:
result.reverse()
return result
builtins.sorted = sorted
#if sys.version_info[:3] in ((2, 2, 0), (2, 2, 1)):
# def lstrip(s, c=string.whitespace):
# while s and s[0] in c:
# s = s[1:]
# return s
# def rstrip(s, c=string.whitespace):
# while s and s[-1] in c:
# s = s[:-1]
# return s
# def strip(s, c=string.whitespace, l=lstrip, r=rstrip):
# return l(r(s, c), c)
#
# object.__setattr__(str, 'lstrip', lstrip)
# object.__setattr__(str, 'rstrip', rstrip)
# object.__setattr__(str, 'strip', strip)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
makermade/arm_android-21_arm-linux-androideabi-4.8 | lib/python2.7/distutils/extension.py | 250 | 10904 | """distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
| gpl-2.0 |
frjaraur/python-deployer | deployer/exceptions.py | 2 | 1483 |
class DeployerException(Exception):
"""
Base exception class.
"""
pass
class ExecCommandFailed(DeployerException):
"""
Execution of a run() or sudo() call on a host failed.
"""
def __init__(self, command, host, use_sudo, status_code, result=None):
self.command = command
self.use_sudo = use_sudo
self.host = host
self.status_code = status_code
self.result = result
DeployerException.__init__(self, 'Executing "%s" on "%s" failed with status code: %s' %
(command, host.slug, status_code))
class QueryException(DeployerException):
"""
Resolving of a Q object in a deployer Node failed.
"""
def __init__(self, node, attr_name, query, inner_exception):
self.node = node
self.attr_name = attr_name
self.query = query
self.inner_exception = inner_exception
DeployerException.__init__(self, 'Running query %s:=%r on "%s" failed' %
(self.attr_name, self.query, repr(self.node)))
class ActionException(DeployerException):
"""
When an action fails.
"""
def __init__(self, inner_exception, traceback):
self.inner_exception = inner_exception
self.traceback = traceback
def __repr__(self):
return 'ActionException(%r)' % repr(self.inner_exception)
class ConnectionFailedException(DeployerException):
"""
When connecting to an SSH host fails.
"""
| bsd-2-clause |
yashchandak/GNN | Preprocess/DCI2ours.py | 1 | 4984 | from __future__ import print_function
import numpy as np
import networkx as nx
from collections import OrderedDict
from scipy.io import loadmat, savemat
import os
"""
IMP: Nodes start from 0
"""
dataset = 'facebook'
source_path = '../Sample_Run/Datasets/%s/DCI_format/'%(dataset)
dest_path = '../Sample_Run/Datasets/%s/ours/'%(dataset)
def net2mat():
f = open(source_path+dataset+'.net', 'rb')
f.readline() #remove first line
G = nx.parse_edgelist(f, delimiter=',',nodetype=int, data=(('weight',float),))
G_sparse = nx.to_scipy_sparse_matrix(G)
savemat(dest_path+'adjmat', {'adjmat':G_sparse})
def dat2feats_labels():
f = open(source_path + dataset + '.dat', 'rb')
print(f.readline()) # remove first line
feats = {}
labels = {}
for line in f:
node, feat, label = line.strip().split(',')
feats[int(node)] = list(feat)
labels[int(node)] = list(label)
#dict.items are arranged in ascending order by the key value
feats = OrderedDict(sorted(feats.items(), key=lambda kv: kv[0]))
labels = OrderedDict(sorted(labels.items(), key=lambda kv: kv[0]))
feat_list = np.array([v for k, v in feats.items()], dtype=int)
label_list= np.array([v for k, v in labels.items()], dtype=int)
print(np.shape(feat_list), np.shape(label_list))
np.save(dest_path+'features.npy', feat_list)
np.save(dest_path+'labels.npy', label_list)
def create_graph_mapping():
#create mappings from labels file
f = open(source_path+dataset+'.edges', 'rb')
G = nx.Graph()
for line in f:
x, y = line.strip().split('::')
G.add_edge(x,y)
remove = []
map = {}
ids = []
ctr = 0
f2 = open(source_path+dataset+'.attr', 'rb')
for line in f2:
l = line.strip().split('::')
node = l[0]
ids.append(node)
map[node] = ctr
ctr += 1
print('Total nodes: %d, Singleton nodes removed: %d'%(len(ids),len(remove)))
np.save(dest_path+'ids', ids)
np.save(dest_path+'map', map)
print('Done creating Mapping for %d node ids'%len(ids))
G_mapped = nx.Graph()
#add edges
for u,v in G.edges():
u,v = map[u], map[v]
G_mapped.add_edge(u,v)
#add singleton nodes
G_mapped.add_nodes_from(set(map)-set(G.nodes()))
G_sparse = nx.to_scipy_sparse_matrix(G_mapped)
savemat(dest_path+'adjmat', {'adjmat':G_sparse})
print('Done creating Mapped Graph')
def attr2feats():
map = np.load(dest_path+'map.npy').item()
f = open(source_path+dataset+'.attr', 'rb')
feats = {}
unmapped = []
for line in f:
l = line.split('::')
pos = map.get(l[0], -1)
if pos != -1:
feats[pos] = l[1:]
else:
unmapped.append(l[0])
print('%d Nodes dont have a mapping!'%(len(unmapped)))
feats = OrderedDict(sorted(feats.items(), key=lambda kv: kv[0]))
feat_list = np.array([v for k, v in feats.items()], dtype=int)
np.save(dest_path+'features.npy', feat_list)
print('Done creating', np.shape(feat_list), 'features')
def lab2labels(max_len =2):
#Supports multi-labels per node also
map = np.load(dest_path+'map.npy').item()
f = open(source_path+dataset+'.lab', 'rb')
labels = {}
unmapped = []
for line in f:
l = line.split('::')
pos = map.get(l[0], -1)
if pos != -1:
temp = labels.get(pos, [0]*max_len)
temp[int(l[1])] = 1
labels[pos] = temp
else:
unmapped.append(l[0])
print('%d Nodes dont have a mapping!'%(len(unmapped)))
labels = OrderedDict(sorted(labels.items(), key=lambda kv: kv[0]))
label_list = np.array([v for k, v in labels.items()], dtype=int)
np.save(dest_path+'labels.npy', label_list)
print('Done creating', np.shape(label_list), 'labels')
def createfolds(trials =10, folds=17):
map = np.load(dest_path+'map.npy').item()
size = len(map.values())
for trial in range(trials):
#They have a common validation set for each trial
val_file = source_path+dataset+'_trial_%d_val.txt'%(trial)
val = np.zeros(size, dtype=bool)
val[[map[node] for node in np.loadtxt(val_file, dtype=str)]] = True
for fold in range(folds):
train_file = source_path+dataset+'_trial_%d_fold_%d.txt'%(trial, fold)
train = np.zeros(size, dtype=bool)
train[[map[node] for node in np.loadtxt(train_file, dtype=str)]] = True
#Add train + Validation sets and then invert it
test = -(train+val)
path = dest_path + 'labels/%d/%d/'% (trial, fold)
if not os.path.exists(path):
os.makedirs(path)
np.save(path + 'val_ids', val)
np.save(path + 'train_ids', train)
np.save(path + 'test_ids', test)
print('Done creating Test, Valid, Train samples')
return
create_graph_mapping()
attr2feats()
lab2labels()
createfolds() | mit |
maxwward/SCOPEBak | askbot/deps/livesettings/values.py | 4 | 24621 | """Taken and modified from the dbsettings project.
http://code.google.com/p/django-values/
"""
from decimal import Decimal
from django import forms
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.cache import cache
from django.utils import simplejson
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str
from django.utils.translation import gettext, ugettext_lazy as _
from django.core.files import storage
from askbot.deps.livesettings.models import find_setting, LongSetting, Setting, SettingNotSet
from askbot.deps.livesettings.overrides import get_overrides
from askbot.deps.livesettings.utils import load_module, is_string_like, is_list_or_tuple
from askbot.deps.livesettings.widgets import ImageInput
import datetime
import logging
import signals
import os
__all__ = ['BASE_GROUP', 'BASE_SUPER_GROUP', 'ConfigurationGroup', 'Value', 'BooleanValue',
'DecimalValue', 'DurationValue',
'FloatValue', 'IntegerValue', 'ModuleValue', 'PercentValue', 'PositiveIntegerValue', 'SortedDotDict',
'StringValue', 'SuperGroup', 'ImageValue', 'LongStringValue', 'MultipleStringValue', 'URLValue']
_WARN = {}
log = logging.getLogger('configuration')
NOTSET = object()
class SortedDotDict(SortedDict):
def __getattr__(self, key):
try:
return self[key]
except:
raise AttributeError, key
def __iter__(self):
vals = self.values()
for k in vals:
yield k
def values(self):
vals = super(SortedDotDict, self).values()
vals = [v for v in vals if isinstance(v, (ConfigurationGroup, Value))]
vals.sort()
return vals
class SuperGroup(object):
"""Aggregates ConfigurationGroup's into super-groups
that are used only for the presentation in the UI"""
def __init__(self, name, ordering = 0):
self.name = name
self.ordering = ordering
self.groups = list()
def append(self, group):
"""adds instance of :class:`ConfigurationGroup`
to the super group
"""
if group not in self.groups:
self.groups.append(group)
BASE_SUPER_GROUP = SuperGroup(_('Main'))
class ConfigurationGroup(SortedDotDict):
"""A simple wrapper for a group of configuration values"""
def __init__(self, key, name, *args, **kwargs):
"""Create a new ConfigurationGroup.
Arguments:
- key
- group name - for display to user
Named Arguments:
- ordering: integer, optional, defaults to 1.
- requires: See `Value` requires. The default `requires` all member values will have if not overridden.
- requiresvalue: See `Values` requires_value. The default `requires_value` if not overridden on the `Value` objects.
"""
self.key = key
self.name = name
self.ordering = kwargs.pop('ordering', 1)
self.requires = kwargs.pop('requires', None)
self.super_group = kwargs.pop('super_group', BASE_SUPER_GROUP)
self.super_group.append(self)
if self.requires:
reqval = kwargs.pop('requiresvalue', key)
if not is_list_or_tuple(reqval):
reqval = (reqval, reqval)
self.requires_value = reqval[0]
self.requires.add_choice(reqval)
super(ConfigurationGroup, self).__init__(*args, **kwargs)
def __cmp__(self, other):
return cmp((self.ordering, self.name), (other.ordering, other.name))
def __eq__(self, other):
return (type(self) == type(other)
and self.ordering == other.ordering
and self.name == other.name)
def __ne__(self, other):
return not self == other
def dict_values(self, load_modules=True):
vals = {}
keys = super(ConfigurationGroup, self).keys()
for key in keys:
v = self[key]
if isinstance(v, Value):
value = v.value
else:
value = v
vals[key] = value
return vals
def values(self):
vals = super(ConfigurationGroup, self).values()
return [v for v in vals if v.enabled()]
BASE_GROUP = ConfigurationGroup('BASE', _('Base Settings'), ordering=0)
class Value(object):
creation_counter = 0
def __init__(self, group, key, **kwargs):
"""
Create a new Value object for configuration.
Args:
- `ConfigurationGroup`
- key - a string key
Named arguments:
- `description` - Will be passed to the field for form usage. Should be a translation proxy. Ex: _('example')
- `help_text` - Will be passed to the field for form usage.
- `choices` - If given, then the form field will use a select box
- `ordering` - Defaults to alphabetical by key if not given.
- `requires` - If given as a `Value`, then this field will only be rendered if that Value evaluates true (for Boolean requires) or the proper key is in the associated value.
- `requiresvalue` - If set, then this field will only be rendered if that value is in the list returned by self.value. Defaults to self.key.
- `hidden` - If true, then render a hidden field.
- `default` - If given, then this Value will return that default whenever it has no assocated `Setting`.
- `update_callback` - if given, then this value will call the callback whenever updated
- `clear_cache` - if `True` - clear all the caches on updates
"""
self.group = group
self.key = key
self.description = kwargs.get('description', None)
self.help_text = kwargs.get('help_text')
self.choices = kwargs.get('choices',[])
self.ordering = kwargs.pop('ordering', 0)
self.hidden = kwargs.pop('hidden', False)
self.update_callback = kwargs.pop('update_callback', None)
self.requires = kwargs.pop('requires', None)
self.clear_cache = kwargs.pop('clear_cache', False)
if self.requires:
reqval = kwargs.pop('requiresvalue', key)
if not is_list_or_tuple(reqval):
reqval = (reqval, reqval)
self.requires_value = reqval[0]
self.requires.add_choice(reqval)
elif group.requires:
self.requires = group.requires
self.requires_value = group.requires_value
if kwargs.has_key('default'):
self.default = kwargs.pop('default')
self.use_default = True
else:
self.use_default = False
self.creation_counter = Value.creation_counter
Value.creation_counter += 1
def __cmp__(self, other):
return cmp((self.ordering, self.description, self.creation_counter), (other.ordering, other.description, other.creation_counter))
def __eq__(self, other):
if type(self) == type(other):
return self.value == other.value
else:
return self.value == other
def __iter__(self):
return iter(self.value)
def __unicode__(self):
return unicode(self.value)
def __str__(self):
return str(self.value)
def add_choice(self, choice):
"""Add a choice if it doesn't already exist."""
if not is_list_or_tuple(choice):
choice = (choice, choice)
skip = False
for k, v in self.choices:
if k == choice[0]:
skip = True
break
if not skip:
self.choices += (choice, )
def choice_field(self, **kwargs):
if self.hidden:
kwargs['widget'] = forms.MultipleHiddenInput()
return forms.ChoiceField(choices=self.choices, **kwargs)
def _choice_values(self):
choices = self.choices
vals = self.value
return [x for x in choices if x[0] in vals]
choice_values = property(fget=_choice_values)
def copy(self):
new_value = self.__class__(self.key)
new_value.__dict__ = self.__dict__.copy()
return new_value
def _default_text(self):
if not self.use_default:
note = ""
else:
if self.default == "":
note = _('Default value: ""')
elif self.choices:
work = []
for x in self.choices:
if x[0] in self.default:
work.append(smart_str(x[1]))
note = gettext('Default value: ') + ", ".join(work)
else:
note = _("Default value: %s") % unicode(self.default)
return note
default_text = property(fget=_default_text)
def enabled(self):
enabled = False
try:
if not self.requires:
enabled = True
else:
v = self.requires.value
if self.requires.choices:
enabled = self.requires_value == v or self.requires_value in v
elif v:
enabled = True
except SettingNotSet:
pass
return enabled
def make_field(self, **kwargs):
if self.choices:
if self.hidden:
kwargs['widget'] = forms.MultipleHiddenInput()
field = self.choice_field(**kwargs)
else:
if self.hidden:
kwargs['widget'] = forms.HiddenInput()
field = self.field(**kwargs)
field.group = self.group
field.default_text = self.default_text
return field
def make_setting(self, db_value):
log.debug('new setting %s.%s', self.group.key, self.key)
return Setting(group=self.group.key, key=self.key, value=db_value)
def _setting(self):
return find_setting(self.group.key, self.key)
setting = property(fget = _setting)
def _value(self):
use_db, overrides = get_overrides()
if not use_db:
try:
val = overrides[self.group.key][self.key]
except KeyError:
if self.use_default:
val = self.default
else:
raise SettingNotSet('%s.%s is not in your LIVESETTINGS_OPTIONS' % (self.group.key, self.key))
else:
try:
val = self.setting.value
except SettingNotSet, sns:
if self.use_default:
val = self.default
if overrides:
# maybe override the default
grp = overrides.get(self.group.key, {})
if grp.has_key(self.key):
val = grp[self.key]
else:
val = NOTSET
except AttributeError, ae:
log.error("Attribute error: %s", ae)
log.error("%s: Could not get _value of %s", self.key, self.setting)
raise(ae)
except Exception, e:
global _WARN
log.error(e)
if str(e).find("configuration_setting") > -1:
if not _WARN.has_key('configuration_setting'):
log.warn('Error loading setting %s.%s from table, OK if you are in syncdb', self.group.key, self.key)
_WARN['configuration_setting'] = True
if self.use_default:
val = self.default
else:
raise ImproperlyConfigured("All settings used in startup must have defaults, %s.%s does not", self.group.key, self.key)
else:
import traceback
traceback.print_exc()
log.warn("Problem finding settings %s.%s, %s", self.group.key, self.key, e)
raise SettingNotSet("Startup error, couldn't load %s.%s" %(self.group.key, self.key))
return val
def update(self, value):
use_db, overrides = get_overrides()
if use_db:
current_value = self.value
new_value = self.to_python(value)
if current_value != new_value:
if self.update_callback:
new_value = apply(self.update_callback, (current_value, new_value))
db_value = self.get_db_prep_save(new_value)
try:
s = self.setting
s.value = db_value
except SettingNotSet:
s = self.make_setting(db_value)
if self.use_default and self.default == new_value:
if s.id:
log.info("Deleted setting %s.%s", self.group.key, self.key)
s.delete()
else:
log.info("Updated setting %s.%s = %s", self.group.key, self.key, value)
s.save()
signals.configuration_value_changed.send(self, old_value=current_value, new_value=new_value, setting=self)
if self.clear_cache:
cache.clear()
return True
else:
log.debug('not updating setting %s.%s - askbot.deps.livesettings db is disabled',self.group.key, self.key)
return False
@property
def value(self):
val = self._value()
return self.to_python(val)
@property
def editor_value(self):
val = self._value()
return self.to_editor(val)
# Subclasses should override the following methods where applicable
def to_python(self, value):
"Returns a native Python object suitable for immediate use"
if value == NOTSET:
value = None
return value
def get_db_prep_save(self, value):
"Returns a value suitable for storage into a CharField"
if value == NOTSET:
value = ""
return unicode(value)
def to_editor(self, value):
"Returns a value suitable for display in a form widget"
if value == NOTSET:
return NOTSET
return unicode(value)
###############
# VALUE TYPES #
###############
class BooleanValue(Value):
class field(forms.BooleanField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.BooleanField.__init__(self, *args, **kwargs)
def add_choice(self, choice):
# ignore choice adding for boolean types
pass
def to_python(self, value):
if value in (True, 't', 'True', 1, '1'):
return True
return False
to_editor = to_python
class DecimalValue(Value):
class field(forms.DecimalField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.DecimalField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value==NOTSET:
return Decimal("0")
try:
return Decimal(value)
except TypeError, te:
log.warning("Can't convert %s to Decimal for settings %s.%s", value, self.group.key, self.key)
raise TypeError(te)
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
# DurationValue has a lot of duplication and ugliness because of issue #2443
# Until DurationField is sorted out, this has to do some extra work
class DurationValue(Value):
class field(forms.CharField):
def clean(self, value):
try:
return datetime.timedelta(seconds=float(value))
except (ValueError, TypeError):
raise forms.ValidationError('This value must be a real number.')
except OverflowError:
raise forms.ValidationError('The maximum allowed value is %s' % datetime.timedelta.max)
def to_python(self, value):
if value == NOTSET:
value = 0
if isinstance(value, datetime.timedelta):
return value
try:
return datetime.timedelta(seconds=float(value))
except (ValueError, TypeError):
raise forms.ValidationError('This value must be a real number.')
except OverflowError:
raise forms.ValidationError('The maximum allowed value is %s' % datetime.timedelta.max)
def get_db_prep_save(self, value):
if value == NOTSET:
return NOTSET
else:
return unicode(value.days * 24 * 3600 + value.seconds + float(value.microseconds) / 1000000)
class FloatValue(Value):
class field(forms.FloatField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.FloatField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value == NOTSET:
value = 0
return float(value)
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
class IntegerValue(Value):
class field(forms.IntegerField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.IntegerField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value == NOTSET:
value = 0
return int(value)
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
class PercentValue(Value):
class field(forms.DecimalField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.DecimalField.__init__(self, 100, 0, 5, 2, *args, **kwargs)
class widget(forms.TextInput):
def render(self, *args, **kwargs):
# Place a percent sign after a smaller text field
attrs = kwargs.pop('attrs', {})
attrs['size'] = attrs['max_length'] = 6
return forms.TextInput.render(self, attrs=attrs, *args, **kwargs) + '%'
def to_python(self, value):
if value == NOTSET:
value = 0
return Decimal(value) / 100
def to_editor(self, value):
if value == NOTSET:
return "0"
else:
return unicode(value)
class PositiveIntegerValue(IntegerValue):
class field(forms.IntegerField):
def __init__(self, *args, **kwargs):
kwargs['min_value'] = 0
forms.IntegerField.__init__(self, *args, **kwargs)
class StringValue(Value):
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.CharField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value == NOTSET:
value = ""
return unicode(value)
to_editor = to_python
class URLValue(Value):
class field(forms.URLField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.URLField.__init__(self, *args, **kwargs)
class LongStringValue(Value):
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
kwargs['widget'] = forms.Textarea()
forms.CharField.__init__(self, *args, **kwargs)
def make_setting(self, db_value):
log.debug('new long setting %s.%s', self.group.key, self.key)
return LongSetting(group=self.group.key, key=self.key, value=db_value)
def to_python(self, value):
if value == NOTSET:
value = ""
return unicode(value)
to_editor = to_python
class ImageValue(StringValue):
def __init__(self, *args, **kwargs):
self.allowed_file_extensions = kwargs.pop(
'allowed_file_extensions',
('jpg', 'gif', 'png')
)
self.upload_directory = kwargs.pop(
'upload_directory',
django_settings.MEDIA_ROOT
)
self.upload_url = kwargs.pop(
'upload_url',
django_settings.MEDIA_URL
)
self.url_resolver = kwargs.pop('url_resolver', None)
super(ImageValue, self).__init__(*args, **kwargs)
class field(forms.FileField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
self.allowed_file_extensions = kwargs.pop('allowed_file_extensions')
url_resolver = kwargs.pop('url_resolver')
kwargs['widget'] = ImageInput(url_resolver = url_resolver)
forms.FileField.__init__(self, *args, **kwargs)
def clean(self, file_data, initial=None):
if not file_data and initial:
return initial
(base_name, ext) = os.path.splitext(file_data.name)
#first character in ext is .
if ext[1:].lower() not in self.allowed_file_extensions:
error_message = _('Allowed image file types are %(types)s') \
% {'types': ', '.join(self.allowed_file_extensions)}
raise forms.ValidationError(error_message)
def make_field(self, **kwargs):
kwargs['url_resolver'] = self.url_resolver
kwargs['allowed_file_extensions'] = self.allowed_file_extensions
return super(StringValue, self).make_field(**kwargs)
def update(self, uploaded_file):
"""uploaded_file is an instance of
django UploadedFile object
"""
#0) initialize file storage
file_storage_class = storage.get_storage_class()
storage_settings = {}
if django_settings.DEFAULT_FILE_STORAGE == \
'django.core.files.storage.FileSystemStorage':
storage_settings = {
'location': self.upload_directory,
'base_url': self.upload_url
}
file_storage = file_storage_class(**storage_settings)
#1) come up with a file name
#todo: need better function here to calc name
file_name = file_storage.get_available_name(uploaded_file.name)
file_storage.save(file_name, uploaded_file)
url = file_storage.url(file_name)
old_file = self.value
old_file = old_file.replace(self.upload_url, '', 1)
old_file_path = os.path.join(self.upload_directory, old_file)
if os.path.isfile(old_file_path):
os.unlink(old_file_path)
#saved file path is relative to the upload_directory
#so that things could be easily relocated
super(ImageValue, self).update(url)
class MultipleStringValue(Value):
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.CharField.__init__(self, *args, **kwargs)
def choice_field(self, **kwargs):
kwargs['required'] = False
return forms.MultipleChoiceField(choices=self.choices, **kwargs)
def get_db_prep_save(self, value):
if is_string_like(value):
value = [value]
return simplejson.dumps(value)
def to_python(self, value):
if not value or value == NOTSET:
return []
if is_list_or_tuple(value):
return value
else:
try:
return simplejson.loads(value)
except:
if is_string_like(value):
return [value]
else:
log.warning('Could not decode returning empty list: %s', value)
return []
to_editor = to_python
class ModuleValue(Value):
"""Handles setting modules, storing them as strings in the db."""
class field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.CharField.__init__(self, *args, **kwargs)
def load_module(self, module):
"""Load a child module"""
value = self._value()
if value == NOTSET:
raise SettingNotSet("%s.%s", self.group.key, self.key)
else:
return load_module("%s.%s" % (value, module))
def to_python(self, value):
if value == NOTSET:
v = {}
else:
v = load_module(value)
return v
def to_editor(self, value):
if value == NOTSET:
value = ""
return value
| gpl-3.0 |
mepps-md/tor | bootstrap.py | 31 | 7509 | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Bootstrap a buildout-based project
Simply run this script in a directory containing a buildout.cfg.
The script accepts buildout command-line options, so you can
use the -c option to specify an alternate configuration file.
"""
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
__version__ = '2015-07-01'
# See zc.buildout's changelog if this version is up to date.
tmpeggs = tempfile.mkdtemp(prefix='bootstrap-')
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("--version",
action="store_true", default=False,
help=("Return bootstrap.py version."))
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--buildout-version",
help="Use a specific zc.buildout version")
parser.add_option("--setuptools-version",
help="Use a specific setuptools version")
parser.add_option("--setuptools-to-dir",
help=("Allow for re-use of existing directory of "
"setuptools versions"))
options, args = parser.parse_args()
if options.version:
print("bootstrap.py version %s" % __version__)
sys.exit(0)
######################################################################
# load/install setuptools
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
if os.path.exists('ez_setup.py'):
exec(open('ez_setup.py').read(), ez)
else:
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
# Strip all site-packages directories from sys.path that
# are not sys.prefix; this is because on Windows
# sys.prefix is a site-package directory.
if sitepackage_path != sys.prefix:
sys.path[:] = [x for x in sys.path
if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
else:
setup_args['version'] = "7.0"
if options.setuptools_to_dir is not None:
setup_args['to_dir'] = options.setuptools_to_dir
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
# Fix sys.path here as easy_install.pth added before PYTHONPATH
cmd = [sys.executable, '-c',
'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
requirement = 'zc.buildout'
version = options.buildout_version or "2.2.5"
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs)
| apache-2.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/django/core/files/storage.py | 51 | 18802 | import errno
import os
import warnings
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils import timezone
from django.utils._os import abspathu, safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import filepath_to_uri, force_text
from django.utils.functional import LazyObject, cached_property
from django.utils.module_loading import import_string
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content, name)
name = self.get_available_name(name, max_length=max_length)
return self._save(name, content)
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a random 7
# character alphanumeric string (before the file extension, if one
# exists) to the filename until the generated filename doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name. Deprecated: use get_accessed_time() instead.
"""
warnings.warn(
'Storage.accessed_time() is deprecated in favor of get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide an accessed_time() method')
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name. Deprecated: use get_created_time() instead.
"""
warnings.warn(
'Storage.created_time() is deprecated in favor of get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a created_time() method')
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name. Deprecated: use get_modified_time() instead.
"""
warnings.warn(
'Storage.modified_time() is deprecated in favor of get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a modified_time() method')
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')
warnings.warn(
'Storage.accessed_time() is deprecated. '
'Storage backends should implement get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.accessed_time(name)
return _possibly_make_aware(dt)
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')
warnings.warn(
'Storage.created_time() is deprecated. '
'Storage backends should implement get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.created_time(name)
return _possibly_make_aware(dt)
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')
warnings.warn(
'Storage.modified_time() is deprecated. '
'Storage backends should implement get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.modified_time(name)
return _possibly_make_aware(dt)
def _possibly_make_aware(dt):
"""
Convert a datetime object in the local timezone to aware
in UTC, if USE_TZ is True.
"""
# This function is only needed to help with the deprecations above and can
# be removed in Django 2.0, RemovedInDjango20Warning.
if settings.USE_TZ:
tz = timezone.get_default_timezone()
return timezone.make_aware(dt, tz).astimezone(timezone.utc)
else:
return dt
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == 'MEDIA_ROOT':
self.__dict__.pop('base_location', None)
self.__dict__.pop('location', None)
elif setting == 'MEDIA_URL':
self.__dict__.pop('base_url', None)
elif setting == 'FILE_UPLOAD_PERMISSIONS':
self.__dict__.pop('file_permissions_mode', None)
elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':
self.__dict__.pop('directory_permissions_mode', None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return abspathu(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith('/'):
self._base_url += '/'
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Store filenames with forward slashes, even on Windows.
return force_text(name.replace('\\', '/'))
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# If os.remove() fails with ENOENT, the file may have been removed
# concurrently, and it's safe to continue normally.
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip('/')
return urljoin(self.base_url, url)
def accessed_time(self, name):
warnings.warn(
'FileSystemStorage.accessed_time() is deprecated in favor of '
'get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
warnings.warn(
'FileSystemStorage.created_time() is deprecated in favor of '
'get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
warnings.warn(
'FileSystemStorage.modified_time() is deprecated in favor of '
'get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ts)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| gpl-3.0 |
omerbartal/open-budget-data | budget/history_neto/download_history.py | 2 | 2066 | ### encoding: utf8
import os
import re
import csv
import json
baseurl = 'http://www.ag.mof.gov.il'
xls = re.compile('[^"\']+xls')
for fn in []:#'history0.html','history1.html']:
data = file(fn).read()
xlss = xls.findall(data)
for x in xlss:
#print os.popen('wget %s%s' % (baseurl, x)).read()
x = x.split('/')[-1]
x1 = x.split('.')[0]+'.csv'
#print os.popen('xls2csv %s | tail -n+14 > %s' % (x, x1)).read()
out = file('history.json','w')
for y in range(1992,2012):
fn = 'history%d.csv' % y
r = csv.reader(file(fn))
for l in r:
name = l[0].strip()
if name == "": continue
code,title = name.split('-',1)
code=code.strip()
title=title.strip()
try:
allocated = int(l[1])
except:
allocated = None
try:
revised = int(l[2])
except:
revised = None
try:
used = int(l[3])
except:
used = None
if code=='0000':
income_allocated = allocated
income_revised = revised
income_used = used
title = 'ืืื ืกืืช ืืืืื ื'
if code == '00':
if income_allocated != None and allocated != None:
allocated -= income_allocated
if income_revised != None and revised != None:
revised -= income_revised
if income_used != None and used != None:
used -= income_used
title = 'ืืืืื ื'
# if code.startswith('0000'):
# if allocated != None:
# allocated = -allocated
# if revised != None:
# revised = -revised
# if used != None:
# used = -used
if used == None and revised == None and allocated == None: continue
j = { 'year':y, 'code' : code, 'title' : title, 'net_allocated' : allocated, 'net_revised' : revised, 'net_used' : used }
out.write(json.dumps(j)+'\n')
| mit |
wweiradio/django | tests/template_tests/syntax_tests/test_autoescape.py | 337 | 5575 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeTagTests(SimpleTestCase):
@setup({'autoescape-tag01': '{% autoescape off %}hello{% endautoescape %}'})
def test_autoescape_tag01(self):
output = self.engine.render_to_string('autoescape-tag01')
self.assertEqual(output, 'hello')
@setup({'autoescape-tag02': '{% autoescape off %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag02(self):
output = self.engine.render_to_string('autoescape-tag02', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
@setup({'autoescape-tag03': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag03(self):
output = self.engine.render_to_string('autoescape-tag03', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
# Autoescape disabling and enabling nest in a predictable way.
@setup({'autoescape-tag04': '{% autoescape off %}'
'{{ first }} {% autoescape on %}{{ first }}{% endautoescape %}{% endautoescape %}'})
def test_autoescape_tag04(self):
output = self.engine.render_to_string('autoescape-tag04', {'first': '<a>'})
self.assertEqual(output, '<a> <a>')
@setup({'autoescape-tag05': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag05(self):
output = self.engine.render_to_string('autoescape-tag05', {'first': '<b>first</b>'})
self.assertEqual(output, '<b>first</b>')
# Strings (ASCII or unicode) already marked as "safe" are not
# auto-escaped
@setup({'autoescape-tag06': '{{ first }}'})
def test_autoescape_tag06(self):
output = self.engine.render_to_string('autoescape-tag06', {'first': mark_safe('<b>first</b>')})
self.assertEqual(output, '<b>first</b>')
@setup({'autoescape-tag07': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag07(self):
output = self.engine.render_to_string('autoescape-tag07', {'first': mark_safe('<b>Apple</b>')})
self.assertEqual(output, '<b>Apple</b>')
@setup({'autoescape-tag08': r'{% autoescape on %}'
r'{{ var|default_if_none:" endquote\" hah" }}{% endautoescape %}'})
def test_autoescape_tag08(self):
"""
Literal string arguments to filters, if used in the result, are safe.
"""
output = self.engine.render_to_string('autoescape-tag08', {"var": None})
self.assertEqual(output, ' endquote" hah')
# Objects which return safe strings as their __str__ method
# won't get double-escaped.
@setup({'autoescape-tag09': r'{{ unsafe }}'})
def test_autoescape_tag09(self):
output = self.engine.render_to_string('autoescape-tag09', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'you & me')
@setup({'autoescape-tag10': r'{{ safe }}'})
def test_autoescape_tag10(self):
output = self.engine.render_to_string('autoescape-tag10', {'safe': SafeClass()})
self.assertEqual(output, 'you > me')
@setup({'autoescape-filtertag01': '{{ first }}{% filter safe %}{{ first }} x<y{% endfilter %}'})
def test_autoescape_filtertag01(self):
"""
The "safe" and "escape" filters cannot work due to internal
implementation details (fortunately, the (no)autoescape block
tags can be used in those cases)
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('autoescape-filtertag01', {'first': '<a>'})
@setup({'autoescape-ifequal01': '{% ifequal var "this & that" %}yes{% endifequal %}'})
def test_autoescape_ifequal01(self):
"""
ifequal compares unescaped vales.
"""
output = self.engine.render_to_string('autoescape-ifequal01', {'var': 'this & that'})
self.assertEqual(output, 'yes')
# Arguments to filters are 'safe' and manipulate their input unescaped.
@setup({'autoescape-filters01': '{{ var|cut:"&" }}'})
def test_autoescape_filters01(self):
output = self.engine.render_to_string('autoescape-filters01', {'var': 'this & that'})
self.assertEqual(output, 'this that')
@setup({'autoescape-filters02': '{{ var|join:" & " }}'})
def test_autoescape_filters02(self):
output = self.engine.render_to_string('autoescape-filters02', {'var': ('Tom', 'Dick', 'Harry')})
self.assertEqual(output, 'Tom & Dick & Harry')
@setup({'autoescape-literals01': '{{ "this & that" }}'})
def test_autoescape_literals01(self):
"""
Literal strings are safe.
"""
output = self.engine.render_to_string('autoescape-literals01')
self.assertEqual(output, 'this & that')
@setup({'autoescape-stringiterations01': '{% for l in var %}{{ l }},{% endfor %}'})
def test_autoescape_stringiterations01(self):
"""
Iterating over strings outputs safe characters.
"""
output = self.engine.render_to_string('autoescape-stringiterations01', {'var': 'K&R'})
self.assertEqual(output, 'K,&,R,')
@setup({'autoescape-lookup01': '{{ var.key }}'})
def test_autoescape_lookup01(self):
"""
Escape requirement survives lookup.
"""
output = self.engine.render_to_string('autoescape-lookup01', {'var': {'key': 'this & that'}})
self.assertEqual(output, 'this & that')
| bsd-3-clause |
cliqz/socorro | alembic/versions/2645cb324bf4_bug_899641_support_w.py | 16 | 1399 | """bug 899641 Support Windows NT 6.3
Revision ID: 2645cb324bf4
Revises: 11cd71153550
Create Date: 2013-07-30 13:09:47.577306
"""
# revision identifiers, used by Alembic.
revision = '2645cb324bf4'
down_revision = '11cd71153550'
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy import types
from sqlalchemy.sql import table, column
class CITEXT(types.UserDefinedType):
name = 'citext'
def get_col_spec(self):
return 'CITEXT'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "citext"
class JSON(types.UserDefinedType):
name = 'json'
def get_col_spec(self):
return 'JSON'
def bind_processor(self, dialect):
return lambda value: value
def result_processor(self, dialect, coltype):
return lambda value: value
def __repr__(self):
return "json"
def upgrade():
op.execute("""
INSERT INTO windows_versions
(windows_version_name, major_version, minor_version)
VALUES('Windows 8.1', 6, 3)
""")
def downgrade():
op.execute("""
DELETE FROM windows_versions
WHERE windows_version_name = 'Windows 8.1'
AND major_version = 6
AND minor_version = 3
""")
| mpl-2.0 |
AnderEnder/ansible-modules-extras | storage/netapp/netapp_e_auth.py | 26 | 9633 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: netapp_e_auth
short_description: Sets or updates the password for a storage array.
description:
- Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web Services proxy. Note, all storage arrays do not have a Monitor or RO role.
version_added: "2.2"
author: Kevin Hulquest (@hulquest)
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
name:
description:
- The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use the ID instead.
required: False
ssid:
description:
- the identifier of the storage array in the Web Services Proxy.
required: False
set_admin:
description:
- Boolean value on whether to update the admin password. If set to false then the RO account is updated.
default: False
current_password:
description:
- The current admin password. This is not required if the password hasn't been set before.
required: False
new_password:
description:
- The password you would like to set. Cannot be more than 30 characters.
required: True
api_url:
description:
- The full API url.
- "Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API
- This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API
- This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
- name: Test module
netapp_e_auth:
name: trex
current_password: 'B4Dpwd'
new_password: 'W0rs3P4sswd'
set_admin: yes
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
'''
RETURN = '''
msg:
description: Success message
returned: success
type: string
sample: "Password Updated Successfully"
'''
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_ssid(module, name, api_url, user, pwd):
count = 0
all_systems = 'storage-systems'
systems_url = api_url + all_systems
rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd)
for system in data:
if system['name'] == name:
count += 1
if count > 1:
module.fail_json(
msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
"Use the id instead")
else:
ssid = system['id']
else:
continue
if count == 0:
module.fail_json(msg="No storage array with the name %s was found" % name)
else:
return ssid
def get_pwd_status(module, ssid, api_url, user, pwd):
pwd_status = "storage-systems/%s/passwords" % ssid
url = api_url + pwd_status
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return data['readOnlyPasswordSet'], data['adminPasswordSet']
except HTTPError:
error = get_exception()
module.fail_json(msg="There was an issue with connecting, please check that your "
"endpoint is properly defined and your credentials are correct: %s" % str(error))
def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
update_pwd = 'storage-systems/%s' % ssid
url = api_url + update_pwd
post_body = json.dumps(dict(storedPassword=pwd))
try:
rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
url_password=api_pwd)
except:
err = get_exception()
module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, str(err)))
return data
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
set_pass = "storage-systems/%s/passwords" % ssid
url = api_url + set_pass
if not current_password:
current_password = ""
post_body = json.dumps(
dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
ignore_errors=True)
except:
err = get_exception()
module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, str(err)))
if rc == 422:
post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
try:
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd)
except Exception:
module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
if int(rc) == 204:
return update_data
else:
module.fail_json(msg="%s:%s" % (rc, data))
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
name=dict(required=False, type='str'),
ssid=dict(required=False, type='str'),
current_password=dict(required=False, no_log=True),
new_password=dict(required=True, no_log=True),
set_admin=dict(required=True, type='bool'),
api_url=dict(required=True),
api_username=dict(required=False),
api_password=dict(required=False, no_log=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
required_one_of=[['name', 'ssid']])
name = module.params['name']
ssid = module.params['ssid']
current_password = module.params['current_password']
new_password = module.params['new_password']
set_admin = module.params['set_admin']
user = module.params['api_username']
pwd = module.params['api_password']
api_url = module.params['api_url']
if not api_url.endswith('/'):
api_url += '/'
if name:
ssid = get_ssid(module, name, api_url, user, pwd)
ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
if admin_pwd and not current_password:
module.fail_json(
msg="Admin account has a password set. " +
"You must supply current_password in order to update the RO or Admin passwords")
if len(new_password) > 30:
module.fail_json(msg="Passwords must not be greater than 30 characters in length")
success = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
new_password=new_password,
set_admin=set_admin)
module.exit_json(changed=True, msg="Password Updated Successfully", **success)
if __name__ == '__main__':
main()
| gpl-3.0 |
Bulochkin/tensorflow_pack | tensorflow/contrib/session_bundle/session_bundle.py | 49 | 6815 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Importer for an exported TensorFlow model.
This module provides a function to create a SessionBundle containing both the
Session and MetaGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.session_bundle import constants
from tensorflow.contrib.session_bundle import manifest_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util.deprecation import deprecated
@deprecated("2017-06-30", "Please use SavedModel instead.")
def maybe_session_bundle_dir(export_dir):
"""Checks if the model path contains session bundle model.
Args:
export_dir: string path to model checkpoint, for example 'model/00000123'
Returns:
true if path contains session bundle model files, ie META_GRAPH_DEF_FILENAME
"""
meta_graph_filename = os.path.join(export_dir,
constants.META_GRAPH_DEF_FILENAME)
return file_io.file_exists(meta_graph_filename)
@deprecated("2017-06-30", "Please use SavedModel instead.")
def load_session_bundle_from_path(export_dir,
target="",
config=None,
meta_graph_def=None):
"""Load session bundle from the given path.
The function reads input from the export_dir, constructs the graph data to the
default graph and restores the parameters for the session created.
Args:
export_dir: the directory that contains files exported by exporter.
target: The execution engine to connect to. See target in tf.Session()
config: A ConfigProto proto with configuration options. See config in
tf.Session()
meta_graph_def: optional object of type MetaGraphDef. If this object is
present, then it is used instead of parsing MetaGraphDef from export_dir.
Returns:
session: a tensorflow session created from the variable files.
meta_graph: a meta graph proto saved in the exporter directory.
Raises:
RuntimeError: if the required files are missing or contain unrecognizable
fields, i.e. the exported model is invalid.
"""
if not meta_graph_def:
meta_graph_filename = os.path.join(export_dir,
constants.META_GRAPH_DEF_FILENAME)
if not file_io.file_exists(meta_graph_filename):
raise RuntimeError("Expected meta graph file missing %s" %
meta_graph_filename)
# Reads meta graph file.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
meta_graph_def.ParseFromString(
file_io.read_file_to_string(meta_graph_filename, binary_mode=True))
variables_filename = ""
variables_filename_list = []
checkpoint_sharded = False
variables_index_filename = os.path.join(export_dir,
constants.VARIABLES_INDEX_FILENAME_V2)
checkpoint_v2 = file_io.file_exists(variables_index_filename)
# Find matching checkpoint files.
if checkpoint_v2:
# The checkpoint is in v2 format.
variables_filename_pattern = os.path.join(
export_dir, constants.VARIABLES_FILENAME_PATTERN_V2)
variables_filename_list = file_io.get_matching_files(
variables_filename_pattern)
checkpoint_sharded = True
else:
variables_filename = os.path.join(export_dir, constants.VARIABLES_FILENAME)
if file_io.file_exists(variables_filename):
variables_filename_list = [variables_filename]
else:
variables_filename = os.path.join(export_dir,
constants.VARIABLES_FILENAME_PATTERN)
variables_filename_list = file_io.get_matching_files(variables_filename)
checkpoint_sharded = True
# Prepare the files to restore a session.
if not variables_filename_list:
restore_files = ""
elif checkpoint_v2 or not checkpoint_sharded:
# For checkpoint v2 or v1 with non-sharded files, use "export" to restore
# the session.
restore_files = constants.VARIABLES_FILENAME
else:
restore_files = constants.VARIABLES_FILENAME_PATTERN
assets_dir = os.path.join(export_dir, constants.ASSETS_DIRECTORY)
collection_def = meta_graph_def.collection_def
graph_def = graph_pb2.GraphDef()
if constants.GRAPH_KEY in collection_def:
# Use serving graph_def in MetaGraphDef collection_def if exists
graph_def_any = collection_def[constants.GRAPH_KEY].any_list.value
if len(graph_def_any) != 1:
raise RuntimeError("Expected exactly one serving GraphDef in : %s" %
meta_graph_def)
else:
graph_def_any[0].Unpack(graph_def)
# Replace the graph def in meta graph proto.
meta_graph_def.graph_def.CopyFrom(graph_def)
ops.reset_default_graph()
sess = session.Session(target, graph=None, config=config)
# Import the graph.
saver = saver_lib.import_meta_graph(meta_graph_def)
# Restore the session.
if restore_files:
saver.restore(sess, os.path.join(export_dir, restore_files))
init_op_tensor = None
if constants.INIT_OP_KEY in collection_def:
init_ops = collection_def[constants.INIT_OP_KEY].node_list.value
if len(init_ops) != 1:
raise RuntimeError("Expected exactly one serving init op in : %s" %
meta_graph_def)
init_op_tensor = ops.get_collection(constants.INIT_OP_KEY)[0]
# Create asset input tensor list.
asset_tensor_dict = {}
if constants.ASSETS_KEY in collection_def:
assets_any = collection_def[constants.ASSETS_KEY].any_list.value
for asset in assets_any:
asset_pb = manifest_pb2.AssetFile()
asset.Unpack(asset_pb)
asset_tensor_dict[asset_pb.tensor_binding.tensor_name] = os.path.join(
assets_dir, asset_pb.filename)
if init_op_tensor:
# Run the init op.
sess.run(fetches=[init_op_tensor], feed_dict=asset_tensor_dict)
return sess, meta_graph_def
| apache-2.0 |
grantsewell/nzbToMedia | libs/unidecode/x053.py | 252 | 4616 | data = (
'Yun ', # 0x00
'Mwun ', # 0x01
'Nay ', # 0x02
'Gai ', # 0x03
'Gai ', # 0x04
'Bao ', # 0x05
'Cong ', # 0x06
'[?] ', # 0x07
'Xiong ', # 0x08
'Peng ', # 0x09
'Ju ', # 0x0a
'Tao ', # 0x0b
'Ge ', # 0x0c
'Pu ', # 0x0d
'An ', # 0x0e
'Pao ', # 0x0f
'Fu ', # 0x10
'Gong ', # 0x11
'Da ', # 0x12
'Jiu ', # 0x13
'Qiong ', # 0x14
'Bi ', # 0x15
'Hua ', # 0x16
'Bei ', # 0x17
'Nao ', # 0x18
'Chi ', # 0x19
'Fang ', # 0x1a
'Jiu ', # 0x1b
'Yi ', # 0x1c
'Za ', # 0x1d
'Jiang ', # 0x1e
'Kang ', # 0x1f
'Jiang ', # 0x20
'Kuang ', # 0x21
'Hu ', # 0x22
'Xia ', # 0x23
'Qu ', # 0x24
'Bian ', # 0x25
'Gui ', # 0x26
'Qie ', # 0x27
'Zang ', # 0x28
'Kuang ', # 0x29
'Fei ', # 0x2a
'Hu ', # 0x2b
'Tou ', # 0x2c
'Gui ', # 0x2d
'Gui ', # 0x2e
'Hui ', # 0x2f
'Dan ', # 0x30
'Gui ', # 0x31
'Lian ', # 0x32
'Lian ', # 0x33
'Suan ', # 0x34
'Du ', # 0x35
'Jiu ', # 0x36
'Qu ', # 0x37
'Xi ', # 0x38
'Pi ', # 0x39
'Qu ', # 0x3a
'Yi ', # 0x3b
'Qia ', # 0x3c
'Yan ', # 0x3d
'Bian ', # 0x3e
'Ni ', # 0x3f
'Qu ', # 0x40
'Shi ', # 0x41
'Xin ', # 0x42
'Qian ', # 0x43
'Nian ', # 0x44
'Sa ', # 0x45
'Zu ', # 0x46
'Sheng ', # 0x47
'Wu ', # 0x48
'Hui ', # 0x49
'Ban ', # 0x4a
'Shi ', # 0x4b
'Xi ', # 0x4c
'Wan ', # 0x4d
'Hua ', # 0x4e
'Xie ', # 0x4f
'Wan ', # 0x50
'Bei ', # 0x51
'Zu ', # 0x52
'Zhuo ', # 0x53
'Xie ', # 0x54
'Dan ', # 0x55
'Mai ', # 0x56
'Nan ', # 0x57
'Dan ', # 0x58
'Ji ', # 0x59
'Bo ', # 0x5a
'Shuai ', # 0x5b
'Bu ', # 0x5c
'Kuang ', # 0x5d
'Bian ', # 0x5e
'Bu ', # 0x5f
'Zhan ', # 0x60
'Qia ', # 0x61
'Lu ', # 0x62
'You ', # 0x63
'Lu ', # 0x64
'Xi ', # 0x65
'Gua ', # 0x66
'Wo ', # 0x67
'Xie ', # 0x68
'Jie ', # 0x69
'Jie ', # 0x6a
'Wei ', # 0x6b
'Ang ', # 0x6c
'Qiong ', # 0x6d
'Zhi ', # 0x6e
'Mao ', # 0x6f
'Yin ', # 0x70
'Wei ', # 0x71
'Shao ', # 0x72
'Ji ', # 0x73
'Que ', # 0x74
'Luan ', # 0x75
'Shi ', # 0x76
'Juan ', # 0x77
'Xie ', # 0x78
'Xu ', # 0x79
'Jin ', # 0x7a
'Que ', # 0x7b
'Wu ', # 0x7c
'Ji ', # 0x7d
'E ', # 0x7e
'Qing ', # 0x7f
'Xi ', # 0x80
'[?] ', # 0x81
'Han ', # 0x82
'Zhan ', # 0x83
'E ', # 0x84
'Ting ', # 0x85
'Li ', # 0x86
'Zhe ', # 0x87
'Han ', # 0x88
'Li ', # 0x89
'Ya ', # 0x8a
'Ya ', # 0x8b
'Yan ', # 0x8c
'She ', # 0x8d
'Zhi ', # 0x8e
'Zha ', # 0x8f
'Pang ', # 0x90
'[?] ', # 0x91
'He ', # 0x92
'Ya ', # 0x93
'Zhi ', # 0x94
'Ce ', # 0x95
'Pang ', # 0x96
'Ti ', # 0x97
'Li ', # 0x98
'She ', # 0x99
'Hou ', # 0x9a
'Ting ', # 0x9b
'Zui ', # 0x9c
'Cuo ', # 0x9d
'Fei ', # 0x9e
'Yuan ', # 0x9f
'Ce ', # 0xa0
'Yuan ', # 0xa1
'Xiang ', # 0xa2
'Yan ', # 0xa3
'Li ', # 0xa4
'Jue ', # 0xa5
'Sha ', # 0xa6
'Dian ', # 0xa7
'Chu ', # 0xa8
'Jiu ', # 0xa9
'Qin ', # 0xaa
'Ao ', # 0xab
'Gui ', # 0xac
'Yan ', # 0xad
'Si ', # 0xae
'Li ', # 0xaf
'Chang ', # 0xb0
'Lan ', # 0xb1
'Li ', # 0xb2
'Yan ', # 0xb3
'Yan ', # 0xb4
'Yuan ', # 0xb5
'Si ', # 0xb6
'Gong ', # 0xb7
'Lin ', # 0xb8
'Qiu ', # 0xb9
'Qu ', # 0xba
'Qu ', # 0xbb
'Uk ', # 0xbc
'Lei ', # 0xbd
'Du ', # 0xbe
'Xian ', # 0xbf
'Zhuan ', # 0xc0
'San ', # 0xc1
'Can ', # 0xc2
'Can ', # 0xc3
'Can ', # 0xc4
'Can ', # 0xc5
'Ai ', # 0xc6
'Dai ', # 0xc7
'You ', # 0xc8
'Cha ', # 0xc9
'Ji ', # 0xca
'You ', # 0xcb
'Shuang ', # 0xcc
'Fan ', # 0xcd
'Shou ', # 0xce
'Guai ', # 0xcf
'Ba ', # 0xd0
'Fa ', # 0xd1
'Ruo ', # 0xd2
'Shi ', # 0xd3
'Shu ', # 0xd4
'Zhuo ', # 0xd5
'Qu ', # 0xd6
'Shou ', # 0xd7
'Bian ', # 0xd8
'Xu ', # 0xd9
'Jia ', # 0xda
'Pan ', # 0xdb
'Sou ', # 0xdc
'Gao ', # 0xdd
'Wei ', # 0xde
'Sou ', # 0xdf
'Die ', # 0xe0
'Rui ', # 0xe1
'Cong ', # 0xe2
'Kou ', # 0xe3
'Gu ', # 0xe4
'Ju ', # 0xe5
'Ling ', # 0xe6
'Gua ', # 0xe7
'Tao ', # 0xe8
'Kou ', # 0xe9
'Zhi ', # 0xea
'Jiao ', # 0xeb
'Zhao ', # 0xec
'Ba ', # 0xed
'Ding ', # 0xee
'Ke ', # 0xef
'Tai ', # 0xf0
'Chi ', # 0xf1
'Shi ', # 0xf2
'You ', # 0xf3
'Qiu ', # 0xf4
'Po ', # 0xf5
'Xie ', # 0xf6
'Hao ', # 0xf7
'Si ', # 0xf8
'Tan ', # 0xf9
'Chi ', # 0xfa
'Le ', # 0xfb
'Diao ', # 0xfc
'Ji ', # 0xfd
'[?] ', # 0xfe
'Hong ', # 0xff
)
| gpl-3.0 |
mondada/pybsdp | bsdp.py | 2 | 6359 | import struct
import collections
import dhcp
TYPE_LIST = 1
TYPE_SELECT = 2
TYPE_FAILED = 3
CODE_TYPE = 1
CODE_VERSION = 2
CODE_SERVER_ID = 3
CODE_SERVER_PRIORITY = 4
CODE_REPLY_PORT = 5
CODE_DEFAULT_BOOT_IMAGE = 7
CODE_SELECTED_BOOT_IMAGE = 8
CODE_BOOT_IMAGE_LIST = 9
CODE_MAX_MESSAGE_SIZE = 12
CODE_SHADOW_MOUNT_URL = 128
CODE_SHADOW_FILE_PATH = 129
CODE_MACHINE_NAME = 130
BSDP_TYPES = {
CODE_TYPE: 'int8',
CODE_VERSION: 'int16',
CODE_SERVER_ID: 'ip',
CODE_SERVER_PRIORITY: 'int16',
CODE_REPLY_PORT: 'int16',
CODE_DEFAULT_BOOT_IMAGE: 'int32',
CODE_SELECTED_BOOT_IMAGE: 'int32',
CODE_BOOT_IMAGE_LIST: '*oct',
CODE_MAX_MESSAGE_SIZE: 'int16',
CODE_SHADOW_MOUNT_URL: 'string',
CODE_SHADOW_FILE_PATH: 'string',
CODE_MACHINE_NAME: 'string',
}
class BsdpPacket:
def __init__(self):
self.options = { }
#
# Return the packet as a printable string.
#
def str(self):
string = ''
for opt in self.options:
if opt in BSDP_TYPES:
fmt = BSDP_TYPES[opt]
else:
fmt = '*oct'
string += 'Option {:d}: {:s}\n'.format(int(opt), dhcp.DhcpPacket.format_for_display(fmt, self.options[opt]))
return string
#
# Parse the data into a BsdpPacket.
#
def decode(self, data):
if isinstance(data, collections.Sequence):
data = struct.pack(str(len(data)) + 'B', *data)
while len(data):
vals = struct.unpack('=BB', data[0:2])
code = vals[0]
length = vals[1]
if code in BSDP_TYPES:
fmt = BSDP_TYPES[code]
else:
fmt = '*oct'
self.options[code] = dhcp.DhcpPacket.decode_value(fmt, data[2:2+length])
data = data[2+length:]
#
# Encode the packet into a data stream. If the unpack parameter is
# True then the data is unpacked into an array of integers that
# each represent a byte of the data, useful for then storing in a
# DHCP packet.
#
def encode(self, unpack = False):
data = ''
for opt in self.options:
if opt in BSDP_TYPES:
fmt = BSDP_TYPES[opt]
else:
fmt = '*B'
dat = dhcp.DhcpPacket.encode_value(fmt, self.options[opt])
data += struct.pack('=BB', opt, len(dat))
data += dat
if unpack:
return struct.unpack(str(len(data)) + 'B', data)
else:
return data
#
# Message type.
#
def setType(self, value):
self.options[CODE_TYPE] = value
def getType(self):
if CODE_TYPE in self.options:
return self.options[CODE_TYPE]
return None
#
# Version.
#
def setVersion(self, value):
self.options[CODE_VERSION] = value
def getVersion(self):
if CODE_VERSION in self.options:
return self.options[CODE_VERSION]
return None
#
# Server id.
# IP Address of BSDP server.
#
def setServerID(self, value):
self.options[CODE_SERVER_ID] = value
def getServerID(self):
if CODE_SERVER_ID in self.options:
return self.options[CODE_SERVER_ID]
return None
#
# Server priority.
# Priority of server over others on the network.
#
def setServerPriority(self, value):
self.options[CODE_SERVER_PRIORITY] = value
def getServerPriority(self):
if CODE_SERVER_PRIORITY in self.options:
return self.options[CODE_SERVER_PRIORITY]
return None
#
# Reply port.
# Port the client is listening on.
#
def setReplyPort(self, value):
self.options[CODE_REPLY_PORT] = value
def getReplyPort(self):
if CODE_REPLY_PORT in self.options:
return self.options[CODE_REPLY_PORT]
return None
#
# Default boot image ID.
#
def setDefaultBootImage(self, value):
self.options[CODE_DEFAULT_BOOT_IMAGE] = value
def getDefaultBootImage(self):
if CODE_DEFAULT_BOOT_IMAGE in self.options:
return self.options[CODE_DEFAULT_BOOT_IMAGE]
return None
#
# Selected boot image ID.
#
def setSelectedBootImage(self, value):
self.options[CODE_SELECTED_BOOT_IMAGE] = value
def getSelectedBootImage(self):
if CODE_SELECTED_BOOT_IMAGE in self.options:
return self.options[CODE_SELECTED_BOOT_IMAGE]
return None
#
# Maximum message size.
#
def setMaxMessageSize(self, value):
self.options[CODE_MAX_MESSAGE_SIZE] = value
def getMaxMessageSize(self):
if CODE_MAX_MESSAGE_SIZE in self.options:
return self.options[CODE_MAX_MESSAGE_SIZE]
return None
#
# Shadow Mount URL
# afp://[username:password@]server/SharePoint
#
def setShadowMountURL(self, value):
self.options[CODE_SHADOW_MOUNT_URL] = value
def getShadowMountURL(self):
if CODE_SHADOW_MOUNT_URL in self.options:
return self.options[CODE_SHADOW_MOUNT_URL]
return None
#
# Shadow File Path
# Directory/Filename
#
def setShadowFilePath(self, value):
self.options[CODE_SHADOW_FILE_PATH] = value
def getShadowFilePath(self):
if CODE_SHADOW_FILE_PATH in self.options:
return self.options[CODE_SHADOW_FILE_PATH]
return None
#
# Machine Name
# Network name of the machine for sharing purposes.
#
def setMachineName(self, value):
self.options[CODE_MACHINE_NAME] = value
def getMachineName(self):
if CODE_MACHINE_NAME in self.options:
return self.options[CODE_MACHINE_NAME]
return None
#
# Append a new image name to the list.
#
def appendBootImageList(self, ident, name):
if CODE_BOOT_IMAGE_LIST in self.options:
data = self.options[CODE_BOOT_IMAGE_LIST]
else:
data = [ ]
if len(data) + 4 + 1 + len(name) > 255:
return
data += [ord(c) for c in struct.pack('!L', ident)]
data += [ord(c) for c in struct.pack('!B', len(name))]
data += [ord(c) for c in struct.pack(str(len(name)) + 's', name)]
self.options[CODE_BOOT_IMAGE_LIST] = data
| mit |
thewtex/ITKMinimalPathExtraction | setup.py | 1 | 2191 | # -*- coding: utf-8 -*-
from __future__ import print_function
from os import sys
try:
from skbuild import setup
except ImportError:
print('scikit-build is required to build from source.', file=sys.stderr)
print('Please run:', file=sys.stderr)
print('', file=sys.stderr)
print(' python -m pip install scikit-build')
sys.exit(1)
setup(
name='itk-minimalpathextraction',
version='1.1.2',
author='Insight Software Consortium',
author_email='itk+community@discourse.itk.org',
packages=['itk'],
package_dir={'itk': 'itk'},
download_url=r'https://github.com/InsightSoftwareConsortium/ITKMinimalPathExtraction',
description=r'A minimal path extraction framework based on Fast Marching arrival functions.',
long_description='itk-minimalpathextraction provides a minimal path '
'extraction framework based on Fast Marching arrival '
'functions.\n'
'Please refer to:\n'
'Mueller, D. "Fast Marching Minimal Path Extraction in ITK", '
'Insight Journal, January-June 2008, http://hdl.handle.net/1926/1332.',
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: C++",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries",
"Operating System :: Android",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS"
],
license='Apache',
keywords='ITK InsightToolkit',
url=r'https://github.com/InsightSoftwareConsortium/ITKMinimalPathExtraction',
install_requires=[
r'itk>=5.2rc3'
]
)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.