gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import sys
import random
from pyspark import SparkContext, RDD, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc, JavaModelWrapper
from pyspark.mllib.linalg import _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['DecisionTreeModel', 'DecisionTree', 'RandomForestModel',
'RandomForest', 'GradientBoostedTreesModel', 'GradientBoostedTrees']
class TreeEnsembleModel(JavaModelWrapper, JavaSaveable):
"""TreeEnsembleModel
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def predict(self, x):
"""
Predict values for a single data point or an RDD of points using
the model trained.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.3.0")
def numTrees(self):
"""
Get number of trees in ensemble.
"""
return self.call("numTrees")
@since("1.3.0")
def totalNumNodes(self):
"""
Get total number of nodes, summed over all trees in the ensemble.
"""
return self.call("totalNumNodes")
def __repr__(self):
""" Summary of model """
return self._java_model.toString()
@since("1.3.0")
def toDebugString(self):
""" Full model """
return self._java_model.toDebugString()
class DecisionTreeModel(JavaModelWrapper, JavaSaveable, JavaLoader):
"""
A decision tree model for classification or regression.
.. versionadded:: 1.1.0
"""
@since("1.1.0")
def predict(self, x):
"""
Predict the label of one or more examples.
.. note:: In Python, predict cannot currently be used within an RDD
transformation or action.
Call predict directly on the RDD instead.
:param x:
Data point (feature vector), or an RDD of data points (feature
vectors).
"""
if isinstance(x, RDD):
return self.call("predict", x.map(_convert_to_vector))
else:
return self.call("predict", _convert_to_vector(x))
@since("1.1.0")
def numNodes(self):
"""Get number of nodes in tree, including leaf nodes."""
return self._java_model.numNodes()
@since("1.1.0")
def depth(self):
"""
Get depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
"""
return self._java_model.depth()
def __repr__(self):
""" summary of model. """
return self._java_model.toString()
@since("1.2.0")
def toDebugString(self):
""" full model. """
return self._java_model.toDebugString()
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.DecisionTreeModel"
class DecisionTree(object):
"""
Learning algorithm for a decision tree model for classification or
regression.
.. versionadded:: 1.1.0
"""
@classmethod
def _train(cls, data, type, numClasses, features, impurity="gini", maxDepth=5, maxBins=32,
minInstancesPerNode=1, minInfoGain=0.0):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainDecisionTreeModel", data, type, numClasses, features,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
return DecisionTreeModel(model)
@classmethod
@since("1.1.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo,
impurity="gini", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for classification.
:param data:
Training data: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from numpy import array
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = DecisionTree.trainClassifier(sc.parallelize(data), 2, {})
>>> print(model)
DecisionTreeModel classifier of depth 1 with 3 nodes
>>> print(model.toDebugString())
DecisionTreeModel classifier of depth 1 with 3 nodes
If (feature 0 <= 0.5)
Predict: 0.0
Else (feature 0 > 0.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict(array([1.0]))
1.0
>>> model.predict(array([0.0]))
0.0
>>> rdd = sc.parallelize([[1.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@classmethod
@since("1.1.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
impurity="variance", maxDepth=5, maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0):
"""
Train a decision tree model for regression.
:param data:
Training data: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 5)
:param maxBins:
Number of bins used for finding splits at each node.
(default: 32)
:param minInstancesPerNode:
Minimum number of instances required at child nodes to create
the parent split.
(default: 1)
:param minInfoGain:
Minimum info gain required to create a split.
(default: 0.0)
:return:
DecisionTreeModel.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import DecisionTree
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = DecisionTree.trainRegressor(sc.parallelize(sparse_data), {})
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {1: 0.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [0.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo,
impurity, maxDepth, maxBins, minInstancesPerNode, minInfoGain)
@inherit_doc
class RandomForestModel(TreeEnsembleModel, JavaLoader):
"""
Represents a random forest model.
.. versionadded:: 1.2.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.RandomForestModel"
class RandomForest(object):
"""
Learning algorithm for a random forest model for classification or
regression.
.. versionadded:: 1.2.0
"""
supportedFeatureSubsetStrategies = ("auto", "all", "sqrt", "log2", "onethird")
@classmethod
def _train(cls, data, algo, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
if featureSubsetStrategy not in cls.supportedFeatureSubsetStrategies:
raise ValueError("unsupported featureSubsetStrategy: %s" % featureSubsetStrategy)
if seed is None:
seed = random.randint(0, 1 << 30)
model = callMLlibFunc("trainRandomForestModel", data, algo, numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
return RandomForestModel(model)
@classmethod
@since("1.2.0")
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32,
seed=None):
"""
Train a random forest model for binary or multiclass
classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
@classmethod
@since("1.2.0")
def trainRegressor(cls, data, categoricalFeaturesInfo, numTrees, featureSubsetStrategy="auto",
impurity="variance", maxDepth=4, maxBins=32, seed=None):
"""
Train a random forest model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "onethird" for regression.
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
The only supported value for regression is "variance".
(default: "variance")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> model = RandomForest.trainRegressor(sc.parallelize(sparse_data), {}, 2, seed=42)
>>> model.numTrees()
2
>>> model.totalNumNodes()
4
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.5
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.5]
"""
return cls._train(data, "regression", 0, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, impurity, maxDepth, maxBins, seed)
@inherit_doc
class GradientBoostedTreesModel(TreeEnsembleModel, JavaLoader):
"""
Represents a gradient-boosted tree model.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
return "org.apache.spark.mllib.tree.model.GradientBoostedTreesModel"
class GradientBoostedTrees(object):
"""
Learning algorithm for a gradient boosted trees model for
classification or regression.
.. versionadded:: 1.3.0
"""
@classmethod
def _train(cls, data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins):
first = data.first()
assert isinstance(first, LabeledPoint), "the data should be RDD of LabeledPoint"
model = callMLlibFunc("trainGradientBoostedTreesModel", data, algo, categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
return GradientBoostedTreesModel(model)
@classmethod
@since("1.3.0")
def trainClassifier(cls, data, categoricalFeaturesInfo,
loss="logLoss", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1}.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "logLoss")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>>
>>> model = GradientBoostedTrees.trainClassifier(sc.parallelize(data), {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
30
>>> print(model) # it already has newline
TreeEnsembleModel classifier with 10 trees
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[2.0], [0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
@classmethod
@since("1.3.0")
def trainRegressor(cls, data, categoricalFeaturesInfo,
loss="leastSquaresError", numIterations=100, learningRate=0.1, maxDepth=3,
maxBins=32):
"""
Train a gradient-boosted trees model for regression.
:param data:
Training dataset: RDD of LabeledPoint. Labels are real numbers.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param loss:
Loss function used for minimization during gradient boosting.
Supported values: "logLoss", "leastSquaresError",
"leastAbsoluteError".
(default: "leastSquaresError")
:param numIterations:
Number of iterations of boosting.
(default: 100)
:param learningRate:
Learning rate for shrinking the contribution of each estimator.
The learning rate should be between in the interval (0, 1].
(default: 0.1)
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 3)
:param maxBins:
Maximum number of bins used for splitting features. DecisionTree
requires maxBins >= max categories.
(default: 32)
:return:
GradientBoostedTreesModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import GradientBoostedTrees
>>> from pyspark.mllib.linalg import SparseVector
>>>
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>>
>>> data = sc.parallelize(sparse_data)
>>> model = GradientBoostedTrees.trainRegressor(data, {}, numIterations=10)
>>> model.numTrees()
10
>>> model.totalNumNodes()
12
>>> model.predict(SparseVector(2, {1: 1.0}))
1.0
>>> model.predict(SparseVector(2, {0: 1.0}))
0.0
>>> rdd = sc.parallelize([[0.0, 1.0], [1.0, 0.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "regression", categoricalFeaturesInfo,
loss, numIterations, learningRate, maxDepth, maxBins)
def _test():
import doctest
globs = globals().copy()
from pyspark.sql import SparkSession
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.tree tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import math
import sys
import paddle.compat as cpt
from op_test import OpTest
from math import sqrt
from math import floor
def gt_e(a, b):
return a > b or abs(a - b) < 1e-4
def gt(a, b):
return (a - b) > 1e-4
def lt_e(a, b):
return a < b or abs(a - b) < 1e-4
def in_quad(x, y, roi_x, roi_y):
# check if (x, y) is in the boundary of roi
for i in range(4):
xs = roi_x[i]
ys = roi_y[i]
xe = roi_x[(i + 1) % 4]
ye = roi_y[(i + 1) % 4]
if abs(ys - ye) < 1e-4:
if abs(y - ys) < 1e-4 and abs(y - ye) < 1e-4 and gt_e(
x, min(xs, xe)) and lt_e(x, max(xs, xe)):
return True
else:
intersec_x = (y - ys) * (xe - xs) / (ye - ys) + xs
if abs(intersec_x - x) < 1e-4 and gt_e(y, min(ys, ye)) and lt_e(
y, max(ys, ye)):
return True
n_cross = 0
for i in range(4):
xs = roi_x[i]
ys = roi_y[i]
xe = roi_x[(i + 1) % 4]
ye = roi_y[(i + 1) % 4]
if abs(ys - ye) < 1e-4:
continue
if lt_e(y, min(ys, ye)) or gt(y, max(ys, ye)):
continue
intersec_x = (y - ys) * (xe - xs) / (ye - ys) + xs
if abs(intersec_x - x) < 1e-4:
return True
if gt(intersec_x, x):
n_cross += 1
return (n_cross % 2 == 1)
def get_transform_matrix(transformed_width, transformed_height, roi_x, roi_y):
x0 = roi_x[0]
x1 = roi_x[1]
x2 = roi_x[2]
x3 = roi_x[3]
y0 = roi_y[0]
y1 = roi_y[1]
y2 = roi_y[2]
y3 = roi_y[3]
len1 = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1))
len2 = sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2))
len3 = sqrt((x2 - x3) * (x2 - x3) + (y2 - y3) * (y2 - y3))
len4 = sqrt((x3 - x0) * (x3 - x0) + (y3 - y0) * (y3 - y0))
estimated_height = (len2 + len4) / 2.0
estimated_width = (len1 + len3) / 2.0
normalized_height = transformed_height
normalized_width = round(estimated_width *
(normalized_height - 1) / estimated_height) + 1
normalized_width = min(normalized_width, transformed_width)
dx1 = x1 - x2
dx2 = x3 - x2
dx3 = x0 - x1 + x2 - x3
dy1 = y1 - y2
dy2 = y3 - y2
dy3 = y0 - y1 + y2 - y3
matrix = np.zeros([9])
matrix[6] = (dx3 * dy2 - dx2 * dy3) / (dx1 * dy2 - dx2 * dy1) / (
normalized_width - 1)
matrix[7] = (dx1 * dy3 - dx3 * dy1) / (dx1 * dy2 - dx2 * dy1) / (
normalized_height - 1)
matrix[8] = 1
matrix[3] = (y1 - y0 + matrix[6] *
(normalized_width - 1) * y1) / (normalized_width - 1)
matrix[4] = (y3 - y0 + matrix[7] *
(normalized_height - 1) * y3) / (normalized_height - 1)
matrix[5] = y0
matrix[0] = (x1 - x0 + matrix[6] *
(normalized_width - 1) * x1) / (normalized_width - 1)
matrix[1] = (x3 - x0 + matrix[7] *
(normalized_height - 1) * x3) / (normalized_height - 1)
matrix[2] = x0
return matrix
def get_source_coords(matrix, out_w, out_h):
u = matrix[0] * out_w + matrix[1] * out_h + matrix[2]
v = matrix[3] * out_w + matrix[4] * out_h + matrix[5]
w = matrix[6] * out_w + matrix[7] * out_h + matrix[8]
in_w = u / w
in_h = v / w
return in_w, in_h
def bilinear_interpolate(in_data, in_n, in_c, in_w, in_h):
batch_size = in_data.shape[0]
channels = in_data.shape[1]
height = in_data.shape[2]
width = in_data.shape[3]
if gt(-0.5, in_w) or gt(in_w, width - 0.5) or gt(-0.5, in_h) or gt(
in_h, height - 0.5):
return 0.0
if gt(0, in_w):
in_w = 0
if gt(0, in_h):
in_h = 0
in_w_floor = floor(in_w)
in_h_floor = floor(in_h)
if gt_e(in_w_floor, width - 1):
in_w_ceil = width - 1
in_w_floor = width - 1
in_w = in_w_floor
else:
in_w_ceil = in_w_floor + 1
if gt_e(in_h_floor, height - 1):
in_h_ceil = height - 1
in_h_floor = height - 1
in_h = in_h_floor
else:
in_h_ceil = in_h_floor + 1
w_floor = in_w - in_w_floor
h_floor = in_h - in_h_floor
w_ceil = 1 - w_floor
h_ceil = 1 - h_floor
v1 = in_data[in_n][in_c][int(in_h_floor)][int(in_w_floor)]
v2 = in_data[in_n][in_c][int(in_h_ceil)][int(in_w_floor)]
v3 = in_data[in_n][in_c][int(in_h_ceil)][int(in_w_ceil)]
v4 = in_data[in_n][in_c][int(in_h_floor)][int(in_w_ceil)]
w1 = w_ceil * h_ceil
w2 = w_ceil * h_floor
w3 = w_floor * h_floor
w4 = w_floor * h_ceil
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
return val
def lod_convert(lod):
ret = [0]
for count in lod:
ret.append(ret[-1] + count)
return ret
def roi_transform(in_data, rois, rois_lod, transformed_height,
transformed_width, spatial_scale):
channels = in_data.shape[1]
in_height = in_data.shape[2]
in_width = in_data.shape[3]
rois_num = rois.shape[0]
roi2image = [0] * rois_num
rois_lod = lod_convert(rois_lod[0])
for i in range(len(rois_lod) - 1):
for j in range(rois_lod[i], rois_lod[i + 1]):
roi2image[j] = i
out = np.zeros([rois_num, channels, transformed_height, transformed_width])
for n in range(rois_num):
roi_x = []
roi_y = []
for k in range(4):
roi_x.append(rois[n][2 * k] * spatial_scale)
roi_y.append(rois[n][2 * k + 1] * spatial_scale)
image_id = roi2image[n]
transform_matrix = get_transform_matrix(
transformed_width, transformed_height, roi_x, roi_y)
for c in range(channels):
for out_h in range(transformed_height):
for out_w in range(transformed_width):
in_w, in_h = get_source_coords(transform_matrix, out_w,
out_h)
if in_quad(in_w, in_h, roi_x, roi_y) and gt_e(
in_w, -0.5) and lt_e(in_w, in_width - 0.5) and gt_e(
in_h, -0.5) and lt_e(in_h, in_height - 0.5):
out[n][c][out_h][out_w] = bilinear_interpolate(
in_data, image_id, c, in_w, in_h)
else:
out[n][c][out_h][out_w] = 0.0
return out.astype("float32")
class TestROIPoolOp(OpTest):
def set_data(self):
self.init_test_case()
self.make_rois()
self.inputs = {'X': self.x, 'ROIs': (self.rois, self.rois_lod)}
self.attrs = {
'spatial_scale': self.spatial_scale,
'transformed_height': self.transformed_height,
'transformed_width': self.transformed_width
}
out = roi_transform(self.x, self.rois, self.rois_lod,
self.transformed_height, self.transformed_width,
self.spatial_scale)
self.outputs = {'Out': out}
def init_test_case(self):
self.batch_size = 2
self.channels = 2
self.height = 8
self.width = 8
# n, c, h, w
self.x_dim = (self.batch_size, self.channels, self.height, self.width)
self.spatial_scale = 1.0 / 2.0
self.transformed_height = 2
self.transformed_width = 3
self.x = np.random.random(self.x_dim).astype('float32')
def make_rois(self):
rois = []
self.rois_lod = [[]]
for bno in range(self.batch_size):
self.rois_lod[0].append(bno + 1)
for i in range(bno + 1):
x1 = np.random.randint(
0,
self.width // self.spatial_scale - self.transformed_width)
y1 = np.random.randint(
0,
self.height // self.spatial_scale - self.transformed_height)
x2 = np.random.randint(x1 + self.transformed_width,
self.width // self.spatial_scale)
y2 = np.random.randint(
0,
self.height // self.spatial_scale - self.transformed_height)
x3 = np.random.randint(x1 + self.transformed_width,
self.width // self.spatial_scale)
y3 = np.random.randint(y1 + self.transformed_height,
self.height // self.spatial_scale)
x4 = np.random.randint(
0,
self.width // self.spatial_scale - self.transformed_width)
y4 = np.random.randint(y1 + self.transformed_height,
self.height // self.spatial_scale)
roi = [x1, y1, x2, y2, x3, y3, x4, y4]
rois.append(roi)
self.rois_num = len(rois)
self.rois = np.array(rois).astype("float32")
def setUp(self):
self.op_type = "roi_perspective_transform"
self.set_data()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
if __name__ == '__main__':
unittest.main()
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LegendOption'
db.create_table(u'profiles_legendoption', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['profiles.Indicator'])),
('bin_type', self.gf('django.db.models.fields.CharField')(max_length=255)),
('bin_options', self.gf('django.db.models.fields.TextField')(default='')),
))
db.send_create_signal(u'profiles', ['LegendOption'])
def backwards(self, orm):
# Deleting model 'LegendOption'
db.delete_table(u'profiles_legendoption')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 4, 13, 42, 53, 389661)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 11, 4, 13, 42, 53, 389253)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
|
|
import requests
from bs4 import BeautifulSoup
from flask import Flask, request, render_template, jsonify, abort, make_response
import config
import decimal
import json
import re
import os
import time
import datetime
from flask_caching import Cache
app = Flask(__name__)
app.config.from_object('config')
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
the_api_key = config.api_key
#####database functions
from functions import db_insert, db_select
###############Check All Stock
@app.route('/api/v1.0/<int:api_key>/stock/release/<string:release_id>', methods=['GET'])
@cache.cached(timeout=180)
def get_stock(api_key, release_id):
if str(api_key) != the_api_key:
abort(401)
#######get mappings
try:
query = db_insert('SELECT * FROM store_mappings WHERE release_id=%s', (release_id,))
except Exception as e:
return str(e)
details = []
data = query.fetchall()
for row in data:
store = row[2]
store_release_id = row[5]
api_url = 'https://api.soundshelter.net/api/v1.0/%s/scrape/%s/release/%s' % (api_key, store, store_release_id)
details.append({'store': store, 'store_release_id': store_release_id, 'api_url': api_url})
api_url = 'https://api.soundshelter.net/api/v1.0/%s/scrape/%s/release/%s' % (api_key, 'juno', release_id)
details.append({'store': 'juno', 'store_release_id': release_id, 'api_url': api_url})
stock_output = []
for release in details:
check_url = release['api_url']
print(check_url)
try:
r = requests.get(check_url, timeout=5)
except Exception as e:
print('Timed out on ' + check_url)
continue
try:
stock_output.append(json.loads(r.text))
except Exception as e:
print(str(e) + ' - appending JSON from ' + api_url)
return jsonify(stock_output)
###############Juno
@app.route('/api/v1.0/<int:api_key>/scrape/juno/release/<int:release_id>', methods=['GET'])
# cached for 60 minutes
## @cache.cached(timeout=3600)
def scrape_juno(api_key, release_id):
if str(api_key) != the_api_key:
abort(401)
juno_url = 'https://www.juno.co.uk/products/' + str(release_id) + '-01'
r = requests.get(juno_url, timeout=5)
soup = BeautifulSoup(r.text, "lxml")
pricing_html = soup.find("div", "product-pricing")
stock = pricing_html.find("em").text
price = pricing_html.find("span", "product-price").text.replace('\n', '').replace('\t', '')
price = price.encode('utf-8', 'ignore')
price = price.replace(u'\u0080', 'EUR')
price = price.replace(u'\u00A3', 'GBP')
cart_url = 'https://www.juno.co.uk/cart/add/' + str(release_id) + '/01/?ref=soundshelter'
if 'Out of stock' in stock:
stock = 'false'
else:
stock = 'true'
return jsonify({'store': 'juno', 'price': price, 'in_stock': stock, 'cart_url': cart_url, 'location': 'UK'}), 201
##########hardwax
#####this goes to a label or artist page and returns all releases on that page
@app.route('/api/v1.0/<int:api_key>/scrape/hardwax/<string:type_of_search>/<string:query>/<string:title>',
methods=['GET'])
# cached for 60 minutes
## @cache.cached(timeout=3600)
def define_hard_wax_search_url(api_key, type_of_search, query, title):
if str(api_key) != the_api_key:
return 401
if type_of_search == 'artist':
type_of_search = 'act'
title = str(title)
base_url = 'https://hardwax.com/' + str(type_of_search) + '/' + str(query.lower().replace(' ', '-'))
x = get_hard_wax(base_url)
return x
@app.route('/api/v1.0/<int:api_key>/scrape/hardwax/new', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def define_hard_wax_new_url(api_key):
if str(api_key) != the_api_key:
return 401
base_url = 'https://hardwax.com/this-week/?paginate_by=500'
x = get_hard_wax(base_url)
return x
@app.route('/api/v1.0/<int:api_key>/scrape/hardwax/bis', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def define_hard_wax_bis_url(api_key):
if str(api_key) != the_api_key:
return 401
base_url = 'https://hardwax.com/back-in-stock/?paginate_by=500'
x = get_hard_wax(base_url)
return x
@app.route('/api/v1.0/<int:api_key>/scrape/hardwax/<string:term>', methods=['GET'])
# cached for 60 minutes
## @cache.cached(timeout=3600)
def define_hard_wax_term_url(api_key, term):
if str(api_key) != the_api_key:
return 401
base_url = 'https://hardwax.com/' + term + '/?paginate_by=500'
x = get_hard_wax(base_url)
return x
@app.route('/api/v1.0/<int:api_key>/scrape/hardwax/release/<string:hardwax_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_hard_wax_release(api_key, hardwax_id):
if str(api_key) != the_api_key:
return 401
base_url = 'https://hardwax.com/' + str(hardwax_id)
####now get the HTML
try:
r = requests.get(base_url, timeout=5)
except Exception as e:
return "Failed to request the Hardwax URL " + base_url, 405
soup = BeautifulSoup(r.text, "lxml")
stock_details = soup.find("div", "add_order").text
cart_url = 'https://hardwax.com/basket/add/' + hardwax_id + '?utm_source=soundshelter.net'
if 'out of stock' in stock_details:
return jsonify({'store': 'hardwax', 'in_stock': 'false', 'cart_url': cart_url, 'location': 'DE', 'price': ''})
else:
return jsonify({'store': 'hardwax', 'in_stock': 'true', 'cart_url': cart_url, 'location': 'DE', 'price': ''})
######scrapes everything on a page
def get_hard_wax(base_url):
####now get the HTML
try:
r = requests.get(base_url, timeout=5)
except Exception as e:
return "Failed to request the Hardwax URL " + base_url, 405
soup = BeautifulSoup(r.text, "lxml")
for product in soup.find_all("div", "listing"):
details = str()
label_html = str()
label = str()
label_url = str()
artist_title = str()
split_a_t = str()
artist = str()
title = str()
release_url = str()
details = product.find("div", "textblock")
label_html = details.find("div", "linesmall")
label = label_html.find("a").text.encode('utf-8')
label_url = label_html.findAll("a")[0]['href']
artist_title = details.find("div", "linebig").text.encode('utf-8')
split_a_t = artist_title.split(":\xc2\xa0")
artist = split_a_t[0]
title = split_a_t[1]
release_url = label_html.findAll("a")[1]['href']
split_release_url = release_url.split('/')
store_release_id = str(split_release_url[1])
print(store_release_id)
if len(store_release_id) < 1:
print('Didnt get the store id - skip')
continue
if len(label) < 3 or len(title) < 3:
print('skipping ' + title + ' or ' + label + ' as less than 3 characters')
continue
# sql = ('SELECT id FROM releases_all WHERE label_no_country LIKE %s AND title LIKE %s') % ('%' + label + '%','%' + title + '%')
try:
query = db_insert(
'INSERT INTO store_mappings (release_id,store,store_url,unique_key,store_release_id, date) SELECT id,%s,%s, md5(concat(id,%s)),%s,now() FROM releases_all WHERE label_no_country LIKE %s AND title LIKE %s ON DUPLICATE KEY UPDATE store_url=values(store_url),store_release_id=values(store_release_id)',
('hardwax', release_url, 'hardwax', store_release_id, label + '%', '%' + title + '%'))
data = query.fetchall()
print(query, data)
except Exception as e:
print(str(e))
continue
return base_url, 201
####################HHV
@app.route('/api/v1.0/<int:api_key>/scrape/hhv/release/<string:hhv_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_hhv_release(api_key, hhv_id):
if str(api_key) != the_api_key:
return 401
##get the API URL
print(hhv_id)
try:
get_api = db_select('''SELECT api_url,deeplink
FROM hhv_releases h
JOIN store_mappings s
ON h.product_id=s.store_release_id
WHERE s.store=%s
AND s.store_release_id=%s
LIMIT 0,1''', ('hhv', hhv_id,))
except Exception as e:
print(str(e))
return jsonify(
{'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'location': 'DE', 'price': ''})
data = get_api.fetchall()
api_url = ''
deeplink = 'http://bit.ly/ss_hhv'
for row in data:
api_url = row[0].replace('cid=1655283590&', '').replace('http://', 'https://')
deeplink = row[1].replace('de/artikel', 'en/item') # translates site to english
if api_url == '':
print('No API URL found')
return jsonify(
{'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink + '?noapiurl', 'location': 'DE', 'price': ''})
print(api_url)
####now get the HTML
try:
with requests.session() as s:
headers = {
'User-Agent': 'soundshelter.net - Webgains affiliate',
'From': 'info@soundshelter.net' # This is another valid field
}
r = s.get(api_url, timeout=5, allow_redirects=True, headers=headers)
except Exception as e:
print("Failed to request the HHV URL " + api_url, 405)
return jsonify({'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink + '?notgrabhhvurl', 'location': 'DE',
'price': ''})
try:
our_json = json.loads(r.text)
except Exception as e:
print('Failed to load JSON')
# We could now do a site scrape as a fallback
try:
headers = {
'User-Agent': 'soundshelter.net - Webgains affiliate',
'From': 'info@soundshelter.net' # This is another valid field
}
r = requests.get(deeplink, timeout=5, headers=headers)
except:
return jsonify(
{'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink + '?notgrabsite', 'location': 'DE',
'price': '', 'price': ''})
soup = BeautifulSoup(r.text, "lxml")
try:
stock_details = soup.find("a", "add_to_cart").text
except:
return jsonify({'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink + '?soupfail', 'location': 'DE',
'price': ''})
if 'Add to Cart' in stock_details:
return jsonify({'store': 'hhv', 'in_stock': 'true', 'cart_url': deeplink + '?fromsitescr', 'location': 'DE',
'price': ''})
else:
return jsonify(
{'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink + '?fromsitescr', 'location': 'DE',
'price': ''})
try:
stock = str(our_json['page']['content'][1]['config']['item']['soldOut'])
except Exception as e:
print('Failed to get the stock from the JSON')
return jsonify(
{'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink + '?notgetstock', 'location': 'DE', 'price': ''})
if stock == 'False':
return jsonify({'store': 'hhv', 'in_stock': 'true', 'cart_url': deeplink, 'location': 'DE', 'price': ''})
else:
return jsonify({'store': 'hhv', 'in_stock': 'false', 'cart_url': deeplink, 'location': 'DE', 'price': ''})
#######deejay
@app.route('/api/v1.0/<int:api_key>/scrape/deejay/release/<string:deejay_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_deejay_release(api_key, deejay_id):
if str(api_key) != the_api_key:
return 401
print(deejay_id)
deeplink = 'http://bit.ly/deejay_ss'
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s
LIMIT 1''', (deejay_id,))
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify({'store': 'deejay', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'price': '',
'duration': duration})
data = get_api.fetchall()
for row in data:
url = 'https://www.deejay.de/content.php?param=' + row[0]
print(url)
try:
r = requests.get(url, timeout=5)
except Exception as e:
error = "Couldnt grab the url " + str(e)
return jsonify(
{'store': 'deejay', 'in_stock': 'false', 'cart_url': url + '?notgrabsite', 'location': 'DE', 'price': ''})
soup = BeautifulSoup(r.text, "lxml")
stock = soup.find('span', 'first').text
if 'In Stock' not in stock:
output = json.dumps({'store': 'deejay', 'in_stock': 'false', 'cart_url': url, 'location': 'DE', 'price': ''})
return output
else:
output = json.dumps({'store': 'deejay', 'in_stock': 'true', 'cart_url': url, 'location': 'DE', 'price': ''})
return output
# earcandy
@app.route('/api/v1.0/<int:api_key>/scrape/earcandy/release/<string:earcandy_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_earcandy_release(api_key, earcandy_id):
if str(api_key) != the_api_key:
return 401
# first up get the earcandy URL
print(earcandy_id)
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s''', (earcandy_id,))
except Exception as e:
print(str(e))
return jsonify({'store': 'earcandy', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery'})
data = get_api.fetchall()
for row in data:
url = row[0]
try:
r = requests.get(url, timeout=5)
except Exception as e:
error = "Couldnt grab the url " + str(e)
return jsonify({'store': 'earcandy', 'in_stock': 'false', 'cart_url': 'http://bit.ly/ear_candy_ss?notgrabsite',
'location': 'USA', 'price': ''})
soup = BeautifulSoup(r.text, "lxml")
stock = soup.find('span', 'VariationProductInventory').text
try:
price = soup.find('span', {'class': 'VariationProductPrice'}).text
price = re.sub("\n|\r", "", price.encode('utf-8')).replace('$', 'USD')
print(price.strip().encode('utf-8', 'ignore'))
except:
price = ''
if 'Out of stock' in stock:
output = json.dumps(
{'store': 'earcandy', 'in_stock': 'false', 'cart_url': url, 'location': 'USA', 'price': price})
return output
else:
output = json.dumps(
{'store': 'earcandy', 'in_stock': 'true', 'cart_url': url, 'location': 'USA', 'price': price})
return output
# soundarcade
@app.route('/api/v1.0/<int:api_key>/scrape/soundarcade/release/<string:soundarcade_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_soundarcade_release(api_key, soundarcade_id):
start_time = time.time()
if str(api_key) != the_api_key:
return 401
print('Doing ' + soundarcade_id)
# first up get the earcandy URL
print(soundarcade_id)
deeplink = 'http://bit.ly/soundarcade_ss'
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s''', (soundarcade_id,))
except Exception as e:
print(str(e))
return jsonify({'store': 'soundarcade', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'price': ''})
data = get_api.fetchall()
for row in data:
url = row[0]
print('Grabbing ' + url)
duration = ("%s seconds" % ((time.time() - start_time)))
try:
r = requests.get(url, timeout=3)
except Exception as e:
error = "Couldnt grab the url " + str(e)
return jsonify(
{'store': 'soundarcade', 'in_stock': 'false', 'cart_url': 'http://bit.ly/soundarcade_ss?notgrabsite',
'location': 'UK', 'price': ''})
# #close any popups
# from selenium import webdriver
# from selenium.webdriver.firefox.options import Options
# #chrome_driver_path = "/var/www/ssapi/selenium/chromedriver"
# #from pyvirtualdisplay import Display
# #display = Display(visible=0, size=(800, 600))
# #display.start()
# options = Options()
# options.add_argument('-headless')
# driver = webdriver.Firefox(log_path='/var/www/geckodriverlog/error.log',firefox_options=options)
# driver.get(url)
# # duration = ("%s seconds" % ((time.time() - start_time)))
# # return jsonify({'store':'soundarcade','in_stock':'false','cart_url':deeplink + '?firstreturn', 'price':'','duration':duration})
# # try:
# # alert = browser.switch_to_alert()
# # alert.accept()
# # print "alert accpted"
# # except:
# # print "no alert"
# #### need to remove a.announcement-bar
# element = driver.find_element_by_class_name('announcement-bar')
# driver.execute_script("""
# var element = arguments[0];
# element.parentNode.removeChild(element);
# """, element)
# html = driver.page_source
# driver.close()
# driver.quit()
# # display.sendstop()
########move to regex
try:
stock = re.findall('<span id="AddToCartText-product-template">([^<]+)', r.text)
except:
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify({'store': 'soundarcade', 'in_stock': 'false', 'cart_url': deeplink + '?noloadregex', 'price': '',
'duration': duration})
print(stock[0])
price = ''
duration = ("%s seconds" % ((time.time() - start_time)))
if 'Sold out' in stock:
output = json.dumps(
{'store': 'soundarcade', 'in_stock': 'false', 'cart_url': url, 'location': 'UK', 'price': '',
'duration': duration})
return output
else:
output = json.dumps(
{'store': 'soundarcade', 'in_stock': 'true', 'cart_url': url, 'location': 'UK', 'price': price,
'duration': duration})
return output
# remove all instances of
# html = re.sub(r"notification-bar","", r.text)
# try:
# soup = BeautifulSoup(html, "lxml")
# except:
# duration = ("%s seconds" % ((time.time() - start_time)))
# return jsonify({'store':'soundarcade','in_stock':'false','cart_url':deeplink + '?souploadfailit', 'price':'','duration':duration})
# try:
# stock = soup.find('span',{'id':'AddToCartText-product-template'}).text
# except:
# duration = ("%s seconds" % ((time.time() - start_time)))
# return jsonify({'store':'soundarcade','in_stock':'false','cart_url':deeplink + '?findinsoupfail', 'price':'','duration':duration})
# try:
# price = soup.find('span',{'id':'ProductPrice-product-template'}).text
# price = re.sub("\n|\r", "", price.encode('utf-8')).replace(' ','').strip()
# price = price.replace(u'\u00a3','GBP')
# print(price.strip().encode('utf-8','ignore'))
# except:
# price = ''
# duration = ("%s seconds" % ((time.time() - start_time)))
# if 'Sold out' in stock:
# output = json.dumps({'store':'soundarcade','in_stock':'false','cart_url':url,'location':'UK', 'price':'','duration':duration})
# return output
# else:
# output = json.dumps({'store':'soundarcade','in_stock':'true','cart_url':url,'location':'UK', 'price': price,'duration':duration})
# return output
# unearthed
@app.route('/api/v1.0/<int:api_key>/scrape/unearthed/release/<string:unearthed_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_unearthed_release(api_key, unearthed_id):
start_time = time.time()
if str(api_key) != the_api_key:
return 401
# # first up get the earcandy URL
print(unearthed_id)
deeplink = 'http://bit.ly/unearthed_ss'
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s
LIMIT 1''', (unearthed_id,))
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify({'store': 'unearthed', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'price': '',
'duration': duration})
data = get_api.fetchall()
for row in data:
url = row[0].replace('https://unearthedsounds.co.uk/', 'https://www.unearthedsounds.co.uk/')
try:
print(url)
r = requests.get(url, timeout=3, allow_redirects=True)
except Exception as e:
error = "Couldnt grab the url " + str(e)
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'unearthed', 'in_stock': 'false', 'cart_url': 'http://bit.ly/unearthed_ss?notgrabsite',
'location': 'UK', 'price': '', 'duration': duration})
return output
try:
soup = BeautifulSoup(r.text, "html.parser")
except Exception as e:
print(str(e))
output = json.dumps(
{'store': 'unearthed', 'in_stock': 'false', 'cart_url': deeplink + '?noloadsoup', 'location': 'UK',
'price': '', 'duration': duration})
return output
try:
stock = soup.find('div', {'class': 'purchase-section'}).text
except Exception as e:
print(str(e))
output = json.dumps(
{'store': 'unearthed', 'in_stock': 'false', 'cart_url': deeplink + '?noparsehtml', 'location': 'UK',
'price': '', 'duration': duration})
return output
price = ''
# return ("%s seconds" % ((time.time() - start_time)))
try:
price = soup.find('h2', {'id': 'price-preview'}).text
price = re.sub("\n|\r", "", price.encode('utf-8')).replace(' ', '').strip()
price = price.replace(u'\u00a3', 'GBP')
print(price.strip().encode('utf-8', 'ignore'))
except:
price = ''
# return ("%s seconds" % ((time.time() - start_time)))
if 'Sold Out' in stock:
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps({'store': 'unearthed', 'in_stock': 'false', 'cart_url': url, 'location': 'UK', 'price': '',
'duration': duration})
return output
else:
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'unearthed', 'in_stock': 'true', 'cart_url': url, 'location': 'UK', 'price': price,
'duration': duration})
return output
# then scrape that badlad
# smallblackdots
@app.route('/api/v1.0/<int:api_key>/scrape/smallblackdots/release/<string:smallblackdots_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_smallblackdots_release(api_key, smallblackdots_id):
start_time = time.time()
if str(api_key) != the_api_key:
return 401
# # first up get the earcandy URL
print(smallblackdots_id)
deeplink = 'http://bit.ly/smallblackdots_ss'
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s
LIMIT 1''', (smallblackdots_id,))
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify(
{'store': 'smallblackdots', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'price': '',
'duration': duration})
data = get_api.fetchall()
for row in data:
url = row[0]
try:
print(url)
headers = {'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Sound Shelter'}
r = requests.get(url, timeout=2, allow_redirects=True, headers=headers)
except Exception as e:
error = "Couldnt grab the url " + str(e)
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'smallblackdots', 'in_stock': 'false', 'cart_url': deeplink + '?notgrabsite', 'location': 'UK',
'price': '', 'duration': duration})
return output
try:
soup = BeautifulSoup(r.text, "html.parser")
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'smallblackdots', 'in_stock': 'false', 'cart_url': deeplink + '?noloadsoup', 'location': 'UK',
'price': '', 'duration': duration})
return output
try:
price = 'EUR' + soup.find('span', {'class': 'woocommerce-Price-amount'}).text.replace(u'\u00a0', '').replace(
u'\u20ac', '').replace(',', '.')
print(price.strip().encode('utf-8', 'ignore'))
except:
price = ''
if soup.find('p', {'class': 'stock out-of-stock'}):
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'smallblackdots', 'in_stock': 'false', 'cart_url': url, 'location': 'EU', 'price': price,
'duration': duration})
return output
if soup.find('button', {'class': 'single_add_to_cart_button'}):
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'smallblackdots', 'in_stock': 'true', 'cart_url': url, 'location': 'EU', 'price': price,
'duration': duration})
return output
else:
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'smallblackdots', 'in_stock': 'false', 'cart_url': deeplink + '?nostockmatch', 'location': 'EU',
'price': price, 'duration': duration})
return output
# smallblackdots
@app.route('/api/v1.0/<int:api_key>/scrape/jpc/release/<string:jpc_id>', methods=['GET'])
# cached for 60 minutes
# @cache.cached(timeout=3600)
def get_jpc_release(api_key, jpc_id):
start_time = time.time()
if str(api_key) != the_api_key:
return 401
# # first up get the earcandy URL
print(jpc_id)
deeplink = 'http://bit.ly/jpc_ss'
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s
LIMIT 1''', (jpc_id,))
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify({'store': 'jpc', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'price': '',
'duration': duration, 'location': 'DE'})
data = get_api.fetchall()
for row in data:
url = row[0]
try:
curl_url = url.replace('https://partner.jpc.de/go.cgi?pid=126&wmid=cc&cpid=1&subid=&target=', '')
print(curl_url)
headers = {'User-Agent': 'Sound Shelter - Affiliate'}
r = requests.get(curl_url, timeout=5, allow_redirects=True, headers=headers)
except Exception as e:
error = "Couldnt grab the url " + str(e)
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'jpc', 'in_stock': 'false', 'cart_url': deeplink + '?notgrabsite', 'location': 'EU', 'price': '',
'duration': duration})
return output
try:
soup = BeautifulSoup(r.text, "html.parser")
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'jpc', 'in_stock': 'false', 'cart_url': deeplink + '?noloadsoup', 'location': 'EU', 'price': '',
'duration': duration})
return output
try:
price = 'EUR' + soup.find('span', {'class': 'woocommerce-Price-amount'}).text.replace(u'\u00a0', '').replace(
u'\u20ac', '').replace(',', '.')
print(price.strip().encode('utf-8', 'ignore'))
except:
price = ''
if soup.find('span', {'class': 'fa-cart-plus'}):
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps({'store': 'jpc', 'in_stock': 'true', 'cart_url': url, 'location': 'EU', 'price': price,
'duration': duration})
return output
else:
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'jpc', 'in_stock': 'false', 'cart_url': deeplink + '?nostockmatch', 'location': 'EU',
'price': price, 'duration': duration})
return output
# bordello
# cached for 60 minutes
@app.route('/api/v1.0/<int:api_key>/scrape/bordello/release/<string:bordello_id>', methods=['GET'])
# # @cache.cached(timeout=3600)
def get_bordello_release(api_key, bordello_id):
start_time = time.time()
if str(api_key) != the_api_key:
return 401
# first up get the earcandy URL
print(bordello_id)
deeplink = 'http://bit.ly/bordello_ss'
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s''', (bordello_id,))
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify({'store': 'bordello', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'price': '',
'duration': duration})
data = get_api.fetchall()
for row in data:
url = row[0]
try:
r = requests.get(url, timeout=5)
except Exception as e:
error = "Couldnt grab the url " + str(e)
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify({'store': 'bordello', 'in_stock': 'false', 'cart_url': 'http://bit.ly/bordello_ss?notgrabsite',
'location': 'NL', 'price': '', 'duration': duration})
soup = BeautifulSoup(r.text, "lxml")
stock = soup.find('p', {'class': 'price'}).text
if 'SOLD OUT' in stock or 'Upcoming' in stock:
price = ''
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps({'store': 'bordello', 'in_stock': 'false', 'cart_url': url, 'location': 'NL', 'price': '',
'duration': duration})
return output
try:
price = soup.find('p', {'class': 'price'}).text
price = re.sub("\n|\r", "", price.encode('utf-8')).replace(' ', '').strip()
price = price.replace(u'\u20ac\u00a0', 'EUR')
print(price.strip().encode('utf-8', 'ignore'))
except:
price = ''
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps({'store': 'bordello', 'in_stock': 'true', 'cart_url': url, 'location': 'NL', 'price': price,
'duration': duration})
return output
@app.route('/api/v1.0/<int:api_key>/scrape/sydx/release/<string:sydx_id>', methods=['GET'])
# @cache.cached(timeout=3600)
def get_sydx_release(api_key, sydx_id):
start_time = time.time()
if str(api_key) != the_api_key:
return 401
# first up get the earcandy URL
print(sydx_id)
deeplink = 'http://bit.ly/sydx_ss'
try:
get_api = db_select('''SELECT store_url
FROM store_mappings
WHERE store_release_id=%s''', (sydx_id,))
except Exception as e:
print(str(e))
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify({'store': 'sydx', 'in_stock': 'false', 'cart_url': deeplink + '?nodbquery', 'price': '',
'duration': duration})
data = get_api.fetchall()
for row in data:
url = row[0]
try:
r = requests.get(url, timeout=5)
except Exception as e:
error = "Couldnt grab the url " + str(e)
duration = ("%s seconds" % ((time.time() - start_time)))
return jsonify(
{'store': 'sydx', 'in_stock': 'false', 'cart_url': 'http://bit.ly/sydx_ss?notgrabsite', 'location': 'AU',
'price': '', 'duration': duration})
soup = BeautifulSoup(r.text, "lxml")
stock = soup.find('p', {'class': 'stock'}).text
if 'Out of stock' in stock:
price = ''
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps({'store': 'sydx', 'in_stock': 'false', 'cart_url': url, 'location': 'AU', 'price': '',
'duration': duration})
return output
try:
price = soup.find('span', {'class': 'woocommerce-Price-amount amount'}).text
price = re.sub("\n|\r", "", price.encode('utf-8')).replace(' ', '').strip()
price = price.replace(u'\u20ac\u00a0', 'AUD')
print(price.strip().encode('utf-8', 'ignore'))
except:
price = ''
duration = ("%s seconds" % ((time.time() - start_time)))
output = json.dumps(
{'store': 'sydx', 'in_stock': 'true', 'cart_url': url, 'location': 'AU', 'price': price, 'duration': duration})
return output
|
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple cross-platform helper to create an RPM package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import fileinput
import os
import re
import shutil
import subprocess
import sys
from tempfile import mkdtemp
# pylint: disable=g-direct-third-party-import
from third_party.py import gflags
gflags.DEFINE_string('name', '', 'The name of the software being packaged.')
gflags.DEFINE_string('version', '',
'The version of the software being packaged.')
gflags.DEFINE_string('release', '',
'The release of the software being packaged.')
gflags.DEFINE_string('arch', '',
'The CPU architecture of the software being packaged.')
gflags.DEFINE_string('spec_file', '',
'The file containing the RPM specification.')
gflags.DEFINE_string('out_file', '',
'The destination to save the resulting RPM file to.')
gflags.DEFINE_boolean('debug', False, 'Print debug messages.')
# Setup to safely create a temporary directory and clean it up when done.
@contextlib.contextmanager
def Cd(newdir, cleanup=lambda: True):
"""Change the current working directory.
This will run the provided cleanup function when the context exits and the
previous working directory is restored.
Args:
newdir: The directory to change to. This must already exist.
cleanup: An optional cleanup function to be executed when the context exits.
Yields:
Nothing.
"""
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
cleanup()
@contextlib.contextmanager
def Tempdir():
"""Create a new temporary directory and change to it.
The temporary directory will be removed when the context exits.
Yields:
The full path of the temporary directory.
"""
dirpath = mkdtemp()
def Cleanup():
shutil.rmtree(dirpath)
with Cd(dirpath, Cleanup):
yield dirpath
def GetFlagValue(flagvalue, strip=True):
if flagvalue:
if flagvalue[0] == '@':
with open(flagvalue[1:], 'r') as f:
flagvalue = f.read()
if strip:
return flagvalue.strip()
return flagvalue
WROTE_FILE_RE = re.compile(r'Wrote: (?P<rpm_path>.+)', re.MULTILINE)
def FindOutputFile(log):
"""Find the written file from the log information."""
m = WROTE_FILE_RE.search(log)
if m:
return m.group('rpm_path')
return None
def CopyAndRewrite(input_file, output_file, replacements=None):
"""Copies the given file and optionally rewrites with replacements.
Args:
input_file: The file to copy.
output_file: The file to write to.
replacements: A dictionary of replacements.
Keys are prefixes scan for, values are the replacements to write after
the prefix.
"""
with open(output_file, 'w') as output:
for line in fileinput.input(input_file):
if replacements:
for prefix, text in replacements.items():
if line.startswith(prefix):
line = prefix + ' ' + text + '\n'
break
output.write(line)
def Which(program):
"""Search for the given program in the PATH.
Args:
program: The program to search for.
Returns:
The full path to the program.
"""
def IsExe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
for path in os.environ['PATH'].split(os.pathsep):
filename = os.path.join(path, program)
if IsExe(filename):
return filename
return None
class NoRpmbuildFound(Exception):
pass
def FindRpmbuild():
path = Which('rpmbuild')
if path:
return path
else:
raise NoRpmbuildFound()
class RpmBuilder(object):
"""A helper class to manage building the RPM file."""
SOURCE_DIR = 'SOURCES'
BUILD_DIR = 'BUILD'
TEMP_DIR = 'TMP'
DIRS = [SOURCE_DIR, BUILD_DIR, TEMP_DIR]
def __init__(self, name, version, release, arch, debug):
self.name = name
self.version = GetFlagValue(version)
self.release = GetFlagValue(release)
self.arch = arch
self.debug = debug
self.files = []
self.rpmbuild_path = FindRpmbuild()
self.rpm_path = None
def AddFiles(self, paths, root=''):
"""Add a set of files to the current RPM.
If an item in paths is a directory, its files are recursively added.
Args:
paths: The files to add.
root: The root of the filesystem to search for files. Defaults to ''.
"""
for path in paths:
full_path = os.path.join(root, path)
if os.path.isdir(full_path):
self.AddFiles(os.listdir(full_path), full_path)
else:
self.files.append(full_path)
def SetupWorkdir(self, spec_file, original_dir):
"""Create the needed structure in the workdir."""
# Create directory structure.
for name in RpmBuilder.DIRS:
if not os.path.exists(name):
os.makedirs(name, 0o777)
# Copy the files.
for f in self.files:
dst_dir = os.path.join(RpmBuilder.BUILD_DIR, os.path.dirname(f))
if not os.path.exists(dst_dir):
os.makedirs(dst_dir, 0o777)
shutil.copy(os.path.join(original_dir, f), dst_dir)
# Copy the spec file, updating with the correct version.
spec_origin = os.path.join(original_dir, spec_file)
self.spec_file = os.path.basename(spec_file)
replacements = {}
if self.version:
replacements['Version:'] = self.version
if self.release:
replacements['Release:'] = self.release
CopyAndRewrite(spec_origin, self.spec_file, replacements)
def CallRpmBuild(self, dirname):
"""Call rpmbuild with the correct arguments."""
args = [
self.rpmbuild_path,
'--define',
'_topdir %s' % dirname,
'--define',
'_tmppath %s/TMP' % dirname,
'--bb',
'--buildroot',
os.path.join(dirname, 'BUILDROOT'),
self.spec_file,
]
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = p.communicate()[0]
if p.returncode == 0:
# Find the created file.
self.rpm_path = FindOutputFile(output)
if p.returncode != 0 or not self.rpm_path:
print('Error calling rpmbuild:')
print(output)
# Return the status.
return p.returncode
def SaveResult(self, out_file):
"""Save the result RPM out of the temporary working directory."""
if self.rpm_path:
shutil.copy(self.rpm_path, out_file)
if self.debug:
print('Saved RPM file to %s' % out_file)
else:
print('No RPM file created.')
def Build(self, spec_file, out_file):
"""Build the RPM described by the spec_file."""
if self.debug:
print('Building RPM for %s at %s' % (self.name, out_file))
original_dir = os.getcwd()
spec_file = os.path.join(original_dir, spec_file)
out_file = os.path.join(original_dir, out_file)
with Tempdir() as dirname:
self.SetupWorkdir(spec_file, original_dir)
status = self.CallRpmBuild(dirname)
self.SaveResult(out_file)
return status
def main(argv=()):
try:
builder = RpmBuilder(FLAGS.name, FLAGS.version, FLAGS.release, FLAGS.arch,
FLAGS.debug)
builder.AddFiles(argv[1:])
return builder.Build(FLAGS.spec_file, FLAGS.out_file)
except NoRpmbuildFound:
print('ERROR: rpmbuild is required but is not present in PATH')
return 1
if __name__ == '__main__':
FLAGS = gflags.FLAGS
main(FLAGS(sys.argv))
|
|
import argparse
import base64
from datetime import datetime
import time
import os
import sys
import select
import shutil
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
from PIL import Image
from flask import Flask
from io import BytesIO
from collections import defaultdict
doWithoutKeras = False # skip using Keras, initially
if not doWithoutKeras:
from keras.models import load_model
import h5py
from keras import __version__ as keras_version
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
# **************** Joystick stuff ****************
doDescriptiveLabels = True # display descriptive names instead of just magic numbers
doPruneEmptyStatus = True # if True, remove 0 values from the button/axis status dictionary
def processJoystickEvent(buffer):
# get the event type, and either the button or axis, depending on the event type
items = np.frombuffer(buffer, dtype=np.uint8, count=2, offset=6)
event = items[0]
buttonOrAxis = items[1]
# get the value of the button or joystick axis
value = np.frombuffer(buffer, dtype=np.int16, count=1, offset=4)[0]
# get the time in milliseconds (since when?) of the event
time = np.frombuffer(buffer, dtype=np.uint32, count=1, offset=0)[0]
return ( int(event), int(buttonOrAxis), int(value), int(time) )
def descriptiveJoystickLabels():
""" descriptive versions of event and device numbers """
eventType = 'eventType'
deviceType = 'deviceType'
eventButtonChanged = { eventType: "button-changed", deviceType: "button" }
eventAxisMoved = { eventType: "axis-moved", deviceType: "axis" }
eventInitButton = { eventType: "initial-value", deviceType: "button" }
eventInitAxis = { eventType: "initial-axis", deviceType: "axis" }
eventUnknown = { eventType: "unknown-event", deviceType: "device" }
joystickEvents = defaultdict( lambda: eventUnknown )
joystickEvents[1] = eventButtonChanged
joystickEvents[2] = eventAxisMoved
joystickEvents[129] = eventInitButton
joystickEvents[130] = eventInitAxis
return ( joystickEvents, eventType, deviceType )
def captureJoystickEvents( joystick = 0, maximumEvents = 0, status = None ):
""" threadable Joystick polling process """
if doDescriptiveLabels:
joystickEvents, eventType, deviceType = descriptiveJoystickLabels()
with open('/dev/input/js{}'.format(joystick), 'rb') as js:
dataFrameSize, dataFrameCursor = 8, 0
buffer = np.zeros( shape=(dataFrameSize,), dtype=np.uint8 )
eventsBeforeQuitting = maximumEvents
while eventsBeforeQuitting > 0 or maximumEvents == 0:
buffer[ dataFrameCursor ] = np.uint8( ord( js.read(1) ) )
dataFrameCursor += 1
if dataFrameCursor >= dataFrameSize:
dataFrameCursor = 0
event, axis, value, time = processJoystickEvent( buffer[:dataFrameSize] )
if doDescriptiveLabels:
type, device = ( joystickEvents[event][eventType],
joystickEvents[event][deviceType] )
else:
type, device = event, "device{}-".format( event )
msg = "Joystick {} event [{}] on {}{}: value = {} at time = {}\n"
sys.stdout.write( msg.format( joystick, type, device, axis, value, time ) )
sys.stdout.flush()
if status is not None:
key = "js{}{}{}".format( joystick, device, axis)
status[ key ] = value
if doPruneEmptyStatus and status[ key ] == 0:
del status[ key ]
eventsBeforeQuitting -= 1
if False and ( event, axis, value ) == ( 1, 0, 1 ): # "pulled the trigger!"
break;
return
if True:
from concurrent.futures import ThreadPoolExecutor
joystickStatus = defaultdict(int)
executor = ThreadPoolExecutor( max_workers = 2 )
joystickThread0 = executor.submit( captureJoystickEvents, joystick = 0, status = joystickStatus )
joystickThread1 = executor.submit( captureJoystickEvents, joystick = 1, status = joystickStatus )
# **************** speed and error controllers ****************
class SimplePIDcontroller:
def __init__( self, Kp, Ki, Kd = 0.0, initialIntegral = 0.0 ):
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self.set_point = 0.
self.error = 0.
self.integral = initialIntegral
self.priorTime = time.time()
self.priorError = 0.0
def set_desired(self, desired):
self.set_point = desired
def get_desired(self):
return float( self.set_point )
def update(self, measurement):
# proportional error
self.error = self.set_point - measurement
# integral error
self.integral += self.error
# derivative error
delta = 0.0
if self.Kd > 0:
deltaError = self.error - self.priorError
deltaTime = time.time() - self.priorTime
if deltaTime > 0:
delta = deltaError / deltaTime
# prepare for future derivatives
self.priorError = self.error
self.priorTime = time.time()
return self.Kp * self.error + self.Ki * self.integral + self.Kd * delta
errorController = SimplePIDcontroller( 0.45, 0.00030, 0.15 ) # 0.55, 0.0005, 0.10
errorController.set_desired( 0.00 )
speedController = SimplePIDcontroller( 0.1, 0.0060, 0.15 ) # 0.1, 0.008, 0.00
set_speed = 13.0 # 9.0 got around complex track, one weird reversal, let's try 11.0
speedController.set_desired(set_speed)
steeringAngles = [0,] * 9 # rolling average accumulator
@sio.on('telemetry')
def telemetry(sid, data):
if data:
global steeringAngles
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
print( "telemetry angle:", steering_angle, "throttle:", throttle, "speed:", speed )
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
if doWithoutKeras:
steering_angle = joystickStatus["js1axis0"] / 32768
print( "override angle:", steering_angle )
else:
trigger = joystickStatus["js1button0"]
if trigger > 0:
# while the joystick trigger is down, control the run manually
steering_angle = joystickStatus["js1axis0"] / 32768
print( "override angle:", steering_angle )
else:
img_shape = (160,320,3)
margin = 100 # reduce the total width of the image by this amount
left = int(margin / 2)
right = img_shape[1] - left
topMargin = 55
bottomMargin = 25
inferredError = float(
model.predict(image_array[None, topMargin:-bottomMargin, left:right, :], batch_size=1))
steering_angle = errorController.update( inferredError / (right - left) )
steering_angle = max( min( steering_angle, 1.0 ), -1.0 )
print( "inferred error:", inferredError, "angle:", steering_angle )
if float( speed ) > ( set_speed * 1.5 ):
# brake with a negative throttle
throttle = float( -0.25 * ( ( float( speed ) / set_speed ) - 1.0 ) )
throttle = min( max( throttle, -0.9 ), -0.0005 )
brake = 1.00
else:
throttle = speedController.update(float(speed))
throttle = max( min( throttle, 0.95 ), 0.0005 )
brake = 0.00
# calculate short-window rolling average of the inferred steering angles
# this is the equivalent of giving a few prior frames a vote on the current angle,
# so as to smooth out outliers in the inferences
steeringAngles = steeringAngles[1:] + [steering_angle]
steering_angle = sum(steeringAngles) / len(steeringAngles)
print( "control angle:", steering_angle, "throttle:", throttle, "brake:", brake )
send_control( steering_angle, throttle )
# save frame
if args.image_folder != '':
timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
image_filename = os.path.join(args.image_folder, timestamp)
image.save('{}.jpg'.format(image_filename))
else:
# NOTE: DON'T EDIT THIS.
sio.emit('manual', data={}, skip_sid=True)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit(
"steer",
data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
},
skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument(
'model',
type=str,
help='Path to model h5 file. Model should be on the same path.'
)
parser.add_argument(
'image_folder',
type=str,
nargs='?',
default='',
help='Path to image folder. This is where the images from the run will be saved.'
)
args = parser.parse_args()
# check that model Keras version is same as local Keras version
if doWithoutKeras: # skip Keras for now
model = None
else:
f = h5py.File(args.model, mode='r')
model_version = f.attrs.get('keras_version')
keras_version = str(keras_version).encode('utf8')
if model_version != keras_version:
print('You are using Keras version ', keras_version,
', but the model was built using ', model_version)
model = load_model(args.model)
if args.image_folder != '':
print("Creating image folder at {}".format(args.image_folder))
if not os.path.exists(args.image_folder):
os.makedirs(args.image_folder)
else:
shutil.rmtree(args.image_folder)
os.makedirs(args.image_folder)
print("RECORDING THIS RUN ...")
else:
print("NOT RECORDING THIS RUN ...")
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
|
|
import mock
import warnings
from django.contrib.sites.models import Site
from django.core.urlresolvers import NoReverseMatch, set_urlconf
from django.test import TestCase
from django.template import Context, Template
try:
from django.test.client import RequestFactory
except ImportError:
from subdomains.compat.requestfactory import RequestFactory # noqa
try:
from django.test.utils import override_settings
except ImportError:
from subdomains.compat.tests import override_settings # noqa
from subdomains.middleware import (SubdomainMiddleware,
SubdomainURLRoutingMiddleware)
from subdomains.utils import reverse, urljoin
def prefix_values(dictionary, prefix):
return dict((key, '%s.%s' % (prefix, value))
for key, value in dictionary.iteritems())
def secure(request):
request.is_secure = lambda: True
return request
class SubdomainTestMixin(object):
DOMAIN = 'example.com'
URL_MODULE_PATH = 'subdomains.tests.urls'
def setUp(self):
super(SubdomainTestMixin, self).setUp()
self.site = Site.objects.get_current()
self.site.domain = self.DOMAIN
self.site.save()
@override_settings(
DEFAULT_URL_SCHEME='http',
ROOT_URLCONF='%s.application' % URL_MODULE_PATH,
SUBDOMAIN_URLCONFS=prefix_values({
None: 'marketing',
'api': 'api',
'www': 'marketing',
}, prefix=URL_MODULE_PATH),
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'subdomains.middleware.SubdomainURLRoutingMiddleware',
))
def run(self, *args, **kwargs):
super(SubdomainTestMixin, self).run(*args, **kwargs)
def get_path_to_urlconf(self, name):
"""
Returns the full path to the given urlconf.
"""
return '.'.join((self.URL_MODULE_PATH, name))
def get_host_for_subdomain(self, subdomain=None):
"""
Returns the hostname for the provided subdomain.
"""
if subdomain is not None:
host = '%s.%s' % (subdomain, self.site.domain)
else:
host = '%s' % self.site.domain
return host
class SubdomainMiddlewareTestCase(SubdomainTestMixin, TestCase):
def setUp(self):
super(SubdomainMiddlewareTestCase, self).setUp()
self.middleware = SubdomainMiddleware()
def test_subdomain_attribute(self):
def subdomain(subdomain):
"""
Returns the subdomain associated with the request by the middleware
for the given subdomain.
"""
host = self.get_host_for_subdomain(subdomain)
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
return request.subdomain
self.assertEqual(subdomain(None), None)
self.assertEqual(subdomain('www'), 'www')
self.assertEqual(subdomain('www.subdomain'), 'www.subdomain')
self.assertEqual(subdomain('subdomain'), 'subdomain')
self.assertEqual(subdomain('another.subdomain'), 'another.subdomain')
def test_www_domain(self):
def host(host):
"""
Returns the subdomain for the provided HTTP Host.
"""
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
return request.subdomain
self.site.domain = 'www.%s' % self.DOMAIN
self.site.save()
with override_settings(REMOVE_WWW_FROM_DOMAIN=False):
self.assertEqual(host('www.%s' % self.DOMAIN), None)
# Squelch the subdomain warning for cleaner test output, since we
# already know that this is an invalid subdomain.
with warnings.catch_warnings(record=True) as warnlist:
self.assertEqual(host('www.subdomain.%s' % self.DOMAIN), None)
self.assertEqual(host('subdomain.%s' % self.DOMAIN), None)
# Trick pyflakes into not warning us about variable usage.
del warnlist
self.assertEqual(host('subdomain.www.%s' % self.DOMAIN),
'subdomain')
self.assertEqual(host('www.subdomain.www.%s' % self.DOMAIN),
'www.subdomain')
with override_settings(REMOVE_WWW_FROM_DOMAIN=True):
self.assertEqual(host('www.%s' % self.DOMAIN), 'www')
self.assertEqual(host('subdomain.%s' % self.DOMAIN), 'subdomain')
self.assertEqual(host('subdomain.www.%s' % self.DOMAIN),
'subdomain.www')
def test_case_insensitive_subdomain(self):
host = 'WWW.%s' % self.DOMAIN
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
self.assertEqual(request.subdomain, 'www')
host = 'www.%s' % self.DOMAIN.upper()
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
self.assertEqual(request.subdomain, 'www')
class SubdomainURLRoutingTestCase(SubdomainTestMixin, TestCase):
def setUp(self):
super(SubdomainURLRoutingTestCase, self).setUp()
self.middleware = SubdomainURLRoutingMiddleware()
def test_url_routing(self):
def urlconf(subdomain):
"""
Returns the URLconf associated with this request.
"""
host = self.get_host_for_subdomain(subdomain)
request = RequestFactory().get('/', HTTP_HOST=host)
self.middleware.process_request(request)
return getattr(request, 'urlconf', None)
self.assertEqual(urlconf(None), self.get_path_to_urlconf('marketing'))
self.assertEqual(urlconf('www'), self.get_path_to_urlconf('marketing'))
self.assertEqual(urlconf('api'), self.get_path_to_urlconf('api'))
# Falls through to the actual ROOT_URLCONF.
self.assertEqual(urlconf('subdomain'), None)
def test_appends_slash(self):
for subdomain in (None, 'api', 'wildcard'):
host = self.get_host_for_subdomain(subdomain)
response = self.client.get('/example', HTTP_HOST=host)
self.assertEqual(response.status_code, 301)
self.assertEqual(response['Location'], 'http://%s/example/' % host)
class SubdomainURLReverseTestCase(SubdomainTestMixin, TestCase):
def test_url_join(self):
self.assertEqual(urljoin(self.DOMAIN, scheme='http'),
'http://%s' % self.DOMAIN)
self.assertEqual(urljoin(self.DOMAIN, scheme='https'),
'https://%s' % self.DOMAIN)
self.assertEqual(urljoin(self.DOMAIN, scheme='http', path='/example/'),
'http://%s/example/' % self.DOMAIN)
def test_implicit_reverse(self):
# Uses settings.SUBDOMAIN_URLCONFS[None], if it exists.
# Otherwise would perform the same behavior as `test_wildcard_reverse`.
self.assertEqual(reverse('home'), 'http://%s/' % self.DOMAIN)
def test_explicit_reverse(self):
# Uses explicitly provided settings.SUBDOMAIN_URLCONF[subdomain]
self.assertEqual(reverse('home', subdomain='api'),
'http://api.%s/' % self.DOMAIN)
self.assertEqual(reverse('view', subdomain='api'),
'http://api.%s/view/' % self.DOMAIN)
def test_wildcard_reverse(self):
# Falls through to settings.ROOT_URLCONF
subdomain = 'wildcard'
self.assertEqual(reverse('home', subdomain),
'http://%s.%s/' % (subdomain, self.DOMAIN))
self.assertEqual(reverse('view', subdomain),
'http://%s.%s/view/' % (subdomain, self.DOMAIN))
def test_reverse_subdomain_mismatch(self):
self.assertRaises(NoReverseMatch, lambda: reverse('view'))
def test_reverse_invalid_urlconf_argument(self):
self.assertRaises(TypeError,
lambda: reverse('home',
urlconf=self.get_path_to_urlconf('marketing')))
def test_reverse_with_request_same_domain(self):
request = RequestFactory().get('/', HTTP_HOST=self.DOMAIN)
self.assertEqual(reverse('home', request=request), '/')
def test_reverse_with_request_different_subdomain(self):
host = 'wildcard.%s' % self.DOMAIN
request = RequestFactory().get('/', HTTP_HOST=host)
self.assertEqual(reverse('home', request=request),
'http://%s/' % self.DOMAIN)
def test_reverse_with_request_same_subdomain(self):
subdomain = 'wildcard'
host = '%s.%s' % (subdomain, self.DOMAIN)
request = RequestFactory().get('/', HTTP_HOST=host)
self.assertEqual(reverse('home', request=request, subdomain=subdomain),
'/')
def test_reverse_with_request_protocol_relative(self):
request = RequestFactory().get('/', HTTP_HOST=self.DOMAIN)
self.assertEqual(reverse('home', scheme='', request=request), '/')
def test_reverse_with_request_secure_protocol_relative(self):
request = secure(RequestFactory().get('/', HTTP_HOST=self.DOMAIN))
self.assertEqual(reverse('home', scheme='', request=request), '/')
def test_reverse_with_request_protocol_unsecure_to_secure(self):
request = RequestFactory().get('/', HTTP_HOST=self.DOMAIN)
self.assertEqual(reverse('home', scheme='https', request=request),
'https://%s/' % self.DOMAIN)
def test_reverse_with_request_protocol_secure_to_default(self):
request = secure(RequestFactory().get('/', HTTP_HOST=self.DOMAIN))
self.assertEqual(reverse('home', request=request),
'http://%s/' % self.DOMAIN)
def test_reverse_with_request_protocol_secure_to_unsecure(self):
request = secure(RequestFactory().get('/', HTTP_HOST=self.DOMAIN))
self.assertEqual(reverse('home', scheme='http', request=request),
'http://%s/' % self.DOMAIN)
def test_using_not_default_urlconf(self):
# Ensure that changing the currently active URLconf to something other
# than the default still resolves wildcard subdomains correctly.
set_urlconf(self.get_path_to_urlconf('api'))
subdomain = 'wildcard'
# This will raise NoReverseMatch if we're using the wrong URLconf for
# the provided subdomain.
self.assertEqual(reverse('application', subdomain=subdomain),
'http://%s.%s/application/' % (subdomain, self.DOMAIN))
class SubdomainTemplateTagTestCase(SubdomainTestMixin, TestCase):
def make_template(self, template):
return Template('{% load subdomainurls %}' + template)
def test_without_subdomain(self):
defaults = {'view': 'home'}
template = self.make_template('{% url view %}')
context = Context(defaults)
rendered = template.render(context).strip()
self.assertEqual(rendered, 'http://%s/' % self.DOMAIN)
def test_with_subdomain(self):
defaults = {'view': 'home'}
template = self.make_template('{% url view subdomain=subdomain %}')
for subdomain in ('www', 'api', 'wildcard'):
context = Context(dict(defaults, subdomain=subdomain))
rendered = template.render(context).strip()
self.assertEqual(rendered,
'http://%s.%s/' % (subdomain, self.DOMAIN))
def test_no_reverse(self):
template = self.make_template('{% url view subdomain=subdomain %}')
context = Context({'view': '__invalid__'})
self.assertRaises(NoReverseMatch, lambda: template.render(context))
def test_implied_subdomain_from_request(self):
template = self.make_template('{% url view %}')
defaults = {'view': 'home'}
request = mock.Mock()
request.subdomain = None
context = Context(dict(defaults, request=request))
rendered = template.render(context).strip()
self.assertEqual(rendered, 'http://%s/' % self.DOMAIN)
for subdomain in ('www', 'api', 'wildcard'):
request = mock.Mock()
request.subdomain = subdomain
context = Context(dict(defaults, request=request))
rendered = template.render(context).strip()
self.assertEqual(rendered,
'http://%s.%s/' % (subdomain, self.DOMAIN))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexc
from neutron.common import log
from neutron.plugins.common import constants
from neutron import quota
from neutron.services import service_base
from oslo_config import cfg
from oslo_log import log as logging
import six
import gbpservice.neutron.extensions
from gbpservice.neutron.services.servicechain.common import constants as scc
# The code below is a monkey patch of key Neutron's modules. This is needed for
# the GBP service to be loaded correctly. GBP extensions' path is added
# to Neutron's so that it's found at extension scanning time.
extensions.append_api_extensions_path(gbpservice.neutron.extensions.__path__)
constants.SERVICECHAIN = "SERVICECHAIN"
constants.COMMON_PREFIXES["SERVICECHAIN"] = "/servicechain"
LOG = logging.getLogger(__name__)
# Service Chain Exceptions
class ServiceProfileNotFound(nexc.NotFound):
message = _("ServiceProfile %(profile_id)s could not be found")
class ServiceProfileInUse(nexc.NotFound):
message = _("Unable to complete operation, ServiceProfile "
"%(profile_id)s is in use")
class ServiceChainNodeNotFound(nexc.NotFound):
message = _("ServiceChainNode %(sc_node_id)s could not be found")
class ServiceChainSpecNotFound(nexc.NotFound):
message = _("ServiceChainSpec %(sc_spec_id)s could not be found")
class ServiceChainInstanceNotFound(nexc.NotFound):
message = _("ServiceChainInstance %(sc_instance_id)s could not be found")
class ServiceChainNodeInUse(nexc.InUse):
message = _("Unable to complete operation, ServiceChainNode "
"%(node_id)s is in use")
class ServiceChainSpecInUse(nexc.InUse):
message = _("Unable to complete operation, ServiceChainSpec "
"%(spec_id)s is in use")
class ServiceTypeNotFound(nexc.NotFound):
message = _("ServiceType %(service_type_id) could not be found")
class ServiceTypeNotSupported(nexc.NotFound):
message = _("ServiceType %(service_type_id) not supported")
class PortNotFound(nexc.NotFound):
message = _("Port %(port_id)s could not be found")
def _validate_str_list(data, valid_values=None):
if not isinstance(data, list):
msg = _("'%s' is not a list") % data
LOG.debug(msg)
return msg
for item in data:
msg = attr._validate_string(item)
if msg:
LOG.debug(msg)
return msg
if len(set(data)) != len(data):
msg = _("Duplicate items in the list: '%s'") % ', '.join(data)
LOG.debug(msg)
return msg
attr.validators['type:string_list'] = _validate_str_list
SERVICECHAIN_NODES = 'servicechain_nodes'
SERVICECHAIN_SPECS = 'servicechain_specs'
SERVICECHAIN_INSTANCES = 'servicechain_instances'
SERVICE_PROFILES = 'service_profiles'
RESOURCE_ATTRIBUTE_MAP = {
SERVICECHAIN_NODES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None}, 'default': '',
'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
'service_type': {'allow_post': True, 'allow_put': False,
'validate': {'type:string_or_none': None},
'is_visible': True, 'default': None},
'service_profile_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None},
'config': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'required': True, 'is_visible': True},
attr.SHARED: {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
},
SERVICECHAIN_SPECS: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
'nodes': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': attr.convert_none_to_empty_list,
'default': None, 'is_visible': True,
'required': True},
'config_param_names': {'allow_post': False, 'allow_put': False,
'validate': {'type:string_list': None},
'default': [], 'is_visible': True},
attr.SHARED: {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
},
SERVICECHAIN_INSTANCES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
'servicechain_specs': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_list': None},
'convert_to': attr.convert_none_to_empty_list,
'default': None, 'is_visible': True,
'required': True},
'provider_ptg_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'consumer_ptg_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'management_ptg_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'classifier_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:uuid_or_none': None},
'is_visible': True, 'default': None,
'required': True},
'config_param_values': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'default': "", 'is_visible': True},
},
SERVICE_PROFILES: {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None}, 'is_visible': True,
'primary_key': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True, 'is_visible': True},
attr.SHARED: {'allow_post': True, 'allow_put': True,
'default': False, 'convert_to': attr.convert_to_boolean,
'is_visible': True, 'required_by_policy': True,
'enforce_policy': True},
'vendor': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'insertion_mode': {'allow_post': True, 'allow_put': True,
'validate': {'type:values':
scc.VALID_INSERTION_MODES},
'is_visible': True, 'default': None},
'service_type': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'required': True},
'service_flavor': {'allow_post': True, 'allow_put': True,
'validate': {'type:string_or_none': None},
'is_visible': True, 'default': None},
},
}
service_chain_quota_opts = [
cfg.IntOpt('quota_servicechain_node',
default=-1,
help=_('Number of Service Chain Nodes allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_servicechain_spec',
default=-1,
help=_('Number of Service Chain Specs allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_servicechain_instance',
default=-1,
help=_('Number of Service Chain Instances allowed per tenant. '
'A negative value means unlimited.')),
cfg.IntOpt('quota_service_profile',
default=-1,
help=_('Number of Service Profiles allowed per tenant. '
'A negative value means unlimited.')),
]
cfg.CONF.register_opts(service_chain_quota_opts, 'QUOTAS')
class Servicechain(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Service Chain Abstraction"
@classmethod
def get_alias(cls):
return "servicechain"
@classmethod
def get_description(cls):
return "Extension for Service Chain Abstraction"
@classmethod
def get_namespace(cls):
return "http://wiki.openstack.org/neutron/sc/v2.0/"
@classmethod
def get_updated(cls):
return "2014-08-03T12:00:00-00:00"
@classmethod
def get_resources(cls):
plural_mappings = resource_helper.build_plural_mappings(
{}, RESOURCE_ATTRIBUTE_MAP)
attr.PLURALS.update(plural_mappings)
for resource_name in ['servicechain_node', 'servicechain_spec',
'servicechain_instance', 'service_profile']:
quota.QUOTAS.register_resource_by_name(resource_name)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.SERVICECHAIN)
@classmethod
def get_plugin_interface(cls):
return ServiceChainPluginBase
def update_attributes_map(self, attributes):
super(Servicechain, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class ServiceChainPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.SERVICECHAIN
def get_plugin_type(self):
return constants.SERVICECHAIN
def get_plugin_description(self):
return 'Service Chain plugin'
def update_chains_pt_added(self, context, policy_target):
""" Auto scaling function.
Override this method to react to policy target creation.
"""
pass
def update_chains_pt_removed(self, context, policy_target):
""" Auto scaling function.
Override this method to react to policy target deletion.
"""
pass
@abc.abstractmethod
@log.log
def get_servicechain_nodes(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
@log.log
def get_servicechain_node(self, context, servicechain_node_id,
fields=None):
pass
@abc.abstractmethod
@log.log
def create_servicechain_node(self, context, servicechain_node):
pass
@abc.abstractmethod
@log.log
def update_servicechain_node(self, context, servicechain_node_id,
servicechain_node):
pass
@abc.abstractmethod
@log.log
def delete_servicechain_node(self, context, servicechain_node_id):
pass
@abc.abstractmethod
@log.log
def get_servicechain_specs(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
@log.log
def get_servicechain_spec(self, context, servicechain_spec_id,
fields=None):
pass
@abc.abstractmethod
@log.log
def create_servicechain_spec(self, context, servicechain_spec):
pass
@abc.abstractmethod
@log.log
def update_servicechain_spec(self, context, servicechain_spec_id,
servicechain_spec):
pass
@abc.abstractmethod
@log.log
def delete_servicechain_spec(self, context, servicechain_spec_id):
pass
@abc.abstractmethod
@log.log
def get_servicechain_instances(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
@log.log
def get_servicechain_instance(self, context, servicechain_instance_id,
fields=None):
pass
@abc.abstractmethod
@log.log
def create_servicechain_instance(self, context, servicechain_instance_id):
pass
@abc.abstractmethod
@log.log
def update_servicechain_instance(self, context, servicechain_instance_id,
servicechain_instance):
pass
@abc.abstractmethod
@log.log
def delete_servicechain_instance(self, context, servicechain_instance_id):
pass
@abc.abstractmethod
@log.log
def create_service_profile(self, context, service_profile):
pass
@abc.abstractmethod
@log.log
def update_service_profile(self, context, service_profile_id,
service_profile):
pass
@abc.abstractmethod
@log.log
def delete_service_profile(self, context, service_profile_id):
pass
@abc.abstractmethod
@log.log
def get_service_profile(self, context, service_profile_id, fields=None):
pass
@abc.abstractmethod
@log.log
def get_service_profiles(self, context, filters=None, fields=None):
pass
|
|
# -*- coding: utf-8 -*-
"""
Objective: create an airfoil with a leading edge restriction, same upper length
restriction, othogonal upper spars and constant thicknesses in four places
Created on Mon Oct 17 10:36:34 2016
@author: Pedro
"""
from __future__ import print_function
import os
import math
import numpy as np
from numpy.linalg import inv
from aeropy.airfoil_module import CST
from aeropy.CST.module_2D import *
# Just as quick trick, to make upper morph I just mirror the image in regards to x
inverted = False
# Defines if basckwards or forwards morphing
morphing_direction = 'forwards'
def calculate_c_baseline(c_L, Au_C, Au_L, deltaz, l_LE=0, eps_LE=0, psi_P_u1=0):
"""Equations in the New_CST.pdf. Calculates the upper chord in order for
the cruise and landing airfoils ot have the same length."""
def integrand(psi, Au, delta_xi):
return np.sqrt(1 + dxi_u(psi, Au, delta_xi)**2)
def f(c_C):
"""Function dependent of c_C and that outputs c_C."""
y_C, err = quad(integrand, 0, 1, args=(Au_C, deltaz/c_C))
y_L, err = quad(integrand, psi_P_u1, 1, args=(Au_L, deltaz/c_L))
y_LE, err = quad(integrand, 0, psi_P_u1, args=(Au_L, deltaz/c_L))
return c_L*((1-eps_LE)*(l_LE+y_LE)+y_L)/y_C
c_C = optimize.fixed_point(f, [c_L])
# In case the calculated chord is really close to the original, but the
# algorithm was not able to make them equal
if abs(c_L - c_C) < 1e-7:
return c_L
# The output is an array so it needs the extra [0]
return c_C[0]
def calculate_psi_goal(psi_baseline, Au_baseline, Au_goal, deltaz,
c_baseline, c_goal, l_LE, eps_LE, psi_1):
"""Find the value for psi that has the same location w on the upper
surface of the goal as psi_baseline on the upper surface of the
baseline"""
def integrand(psi_baseline, Au, deltaz, c):
return c*np.sqrt(1 + dxi_u(psi_baseline, Au, deltaz/c)**2)
def equation(psi_goal, Au_goal, deltaz, c):
if psi_goal != psi_1:
L_baseline, err = quad(integrand, psi_1, psi_baseline, args=(Au_baseline, deltaz,
c_baseline))
else:
L_baseline = 0
L_LE, err = quad(integrand, 0, psi_1, args=(Au_baseline, deltaz,
c_baseline))
y, err = quad(integrand, 0, psi_goal, args=(Au_goal, deltaz, c))
return y - (1-eps_LE)*(L_LE+c_baseline*l_LE) - L_baseline
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = fsolve(equation, psi_baseline, args=(Au_goal, deltaz,
c_goal))
return y[0]
def calculate_A0_moving_LE(psi_baseline, psi_goal_0, Au_baseline, Au_goal, deltaz,
c_baseline, l_LE, eps_LE):
"""Find the value for A_P0^c that has the same arc length for the first bay
as for the parent."""
def integrand(psi_baseline, Al, deltaz, c):
return c*np.sqrt(1 + dxi_u(psi_baseline, Al, deltaz/c)**2)
def equation(A0, L_baseline, Au_goal, deltaz):
Au_goal[0] = A0
c = calculate_c_baseline(c_P, Au_goal, Au_baseline, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
y, err = quad(integrand, 0, psi_goal_0, args=(Au_goal, deltaz, c))
print('y', y, y - (1-eps_LE)*L_baseline, A0, c)
return y - (1-eps_LE)*(L_baseline - c*l_LE)
L_baseline, err = quad(integrand, 0, psi_baseline[0], args=(Au_baseline, deltaz,
c_baseline))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y = fsolve(equation, Au_goal[0], args=(L_baseline, Au_goal, deltaz))
return y[0]
def calculate_spar_direction(psi_baseline, Au_baseline, Au_goal, deltaz, c_goal, l_LE, eps_LE, psi_spars):
"""Calculate the direction of the spar component based on a location
at the upper surface for the cruise airfoil."""
# Calculate cruise chord
c_baseline = calculate_c_baseline(c_goal, Au_baseline, Au_goal,
deltaz, l_LE, eps_LE, psi_spars[0])
# Calculate psi at goal arifoil
psi_goal = calculate_psi_goal(psi_baseline, Au_baseline, Au_goal, deltaz,
c_baseline, c_goal, l_LE, eps_LE, psi_spars[0])
# non-normalized direction
s = np.zeros(2)
t = np.zeros(2)
# t_norm = np.sqrt(1 + (dxi_u(psi_goal, Au_goal[0], Au_goal[1], deltaz))**2)
cbeta = calculate_cbeta(psi_baseline, Au_baseline,
deltaz/c_baseline)
sbeta = np.sqrt(1-cbeta**2)
t[0] = 1
t[1] = dxi_u(psi_goal, Au_goal, deltaz/c_goal)
t_norm = np.sqrt(t[0]**2 + t[1]**2)
t = (1./t_norm)*t
# s[0] = t_norm*cbeta - dxi_u(psi_goal, Au_goal[0], Au_goal[1], deltaz)
# s[1] = 1
s[1] = t[1]*cbeta + t[0]*sbeta
s[0] = (cbeta - s[1]*t[1])/t[0]
return s
# ==============================================================================
# Calculate dependent shape function parameters
# ==============================================================================
def calculate_dependent_shape_coefficients(Au_C_1_to_n,
psi_spars, Au_P, Al_P, deltaz, c_P,
morphing='backwards', l_LE=0, eps_LE=0):
"""Calculate dependent shape coefficients for children configuration for a 4 order
Bernstein polynomial and return the children upper, lower shape
coefficients, children chord and spar thicknesses. _P denotes parent parameters"""
def calculate_AC_u0(AC_u0, constant_LE=True):
Au_C = [AC_u0] + Au_C_1_to_n
if constant_LE:
return np.sqrt(c_P/c_C)*Au_P[0]
else:
return calculate_A0_moving_LE(psi_spars, psi_lower_children[0], Au_P, Au_C, deltaz,
c_P, l_LE, eps_LE)
# Bersntein Polynomial
def K(r, n):
K = math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return K
# Bernstein Polynomial order
# In case of leading edge radius constraint
n = len(Au_C_1_to_n)
# Find upper shape coefficient though iterative method since Au_0 is unknown
# via fixed point iteration
#AC_u0 = optimize.fixed_point(calculate_AC_u0, Au_P[0])
# print AC_u0
error = 9999
psi_lower_children = psi_spars
Au_C = [Au_P[0]] + Au_C_1_to_n # [Au_P[0]] +
former_chord = c_P
while error > 1e-5:
former_Au_C = []
for i in range(len(Au_C)):
former_Au_C.append(Au_C[i])
# Because the output is an array, need the extra [0]
error_A0 = 999
# Now that AC_u0 is known we can calculate the actual chord and AC_l0
# c_C = calculate_c_baseline(c_P, Au_C, Au_P, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
Au_C[0] = calculate_AC_u0(Au_C[0], constant_LE=False)
Al_C0 = Au_C[0]
c_C = calculate_c_baseline(c_P, Au_C, Au_P, deltaz/c_P, l_LE, eps_LE, psi_spars[0])
#Al_C0 = Au_C[0]
# print '0 lower shape coefficient: ',AC_l0
# Calculate thicknessed and tensor B for the constraint linear system problem
spar_thicknesses = []
if morphing == 'forwards':
f = np.zeros((n, 1))
# psi/xi coordinates for lower surface of the children configuration
psi_lower_children = []
xi_lower_children = []
xi_upper_children = []
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_upper_children = []
for j in range(len(psi_spars)):
print(j)
psi_upper_children.append(calculate_psi_goal(psi_spars[j], Au_P, Au_C, deltaz,
c_P, c_C, l_LE, eps_LE, psi_spars[0]))
# Calculate xi for upper children. Do not care about lower so just gave it random shape coefficients
xi_upper_children = CST(psi_upper_children, 1., deltasz=[
deltaz/2./c_C, deltaz/2./c_C], Al=Au_C, Au=Au_C)
xi_upper_children = xi_upper_children['u']
# print xi_upper_children
# Debugging section
# x = np.linspace(0,1)
# y = CST(x, 1., deltasz= [deltaz/2./c_C, deltaz/2./c_C], Al= Au_C, Au =Au_C)
# plt.plot(x,y['u'])
# plt.scatter(psi_upper_children, xi_upper_children)
# plt.grid()
# plt.show()
# BREAK
print(Au_P, Au_C, len(psi_spars), n)
for j in range(len(psi_spars)):
xi_parent = CST(psi_spars, 1., deltasz=[
deltaz/2./c_P, deltaz/2./c_P], Al=Al_P, Au=Au_P)
delta_j_P = xi_parent['u'][j]-xi_parent['l'][j]
t_j = c_P*(delta_j_P)
# Claculate orientation for children
s_j = calculate_spar_direction(
psi_spars[j], Au_P, Au_C, deltaz, c_C, l_LE, eps_LE, psi_spars)
psi_l_j = psi_upper_children[j]-delta_j_P/c_C*s_j[0]
xi_l_j = xi_upper_children[j]-delta_j_P/c_C*s_j[1]
spar_thicknesses.append(t_j)
psi_lower_children.append(psi_l_j)
xi_lower_children.append(xi_l_j)
f[j] = (2*xi_l_j + psi_l_j*deltaz/c_C) / \
(2*(psi_l_j**0.5)*(psi_l_j-1)) - Al_C0*(1-psi_l_j)**n
F = np.zeros((n, n))
# j is the row dimension and i the column dimension in this case
for j in range(n):
for i in range(n):
# Because in Python counting starts at 0, need to add 1 to be
# coherent for equations
r = i + 1
F[j][i] = K(r, n)*(psi_lower_children[j]**r)*(1-psi_lower_children[j])**(n-r)
print(F)
print(f)
A_lower = np.dot(inv(F), f)
print('result', A_lower)
Al_C = [Al_C0]
for i in range(len(A_lower)):
Al_C.append(A_lower[i][0]) # extra [0] is necessary because of array
error_denominator = 0
print('before', former_Au_C, Au_C)
for i in range(len(Au_C)):
error_denominator += Au_C[i]**2
error = 0
for i in range(len(Al_C)):
error += (former_Au_C[i] - Au_C[i])**2/error_denominator
error = math.sqrt(error)
# error = abs(c_C-former_chord)/c_C
# AC_u0 = calculate_AC_u0(AC_u0, constant_LE=False)
print(error, Al_C, Au_C)
# former_chord = c_C
return Au_C, Al_C, c_C, spar_thicknesses
def calculate_shape_coefficients_tracing(A0, x, y, N1, N2, chord=1., EndThickness=0):
"""
inputs:
- tip_displacement: {'x': value, 'y': value}
- other_points: {'x': value, 'y': value}
- A0: float value for first shape coefficient. Usually related to a constraint.
"""
# Bersntein Polynomial
def K(r, n):
K = math.factorial(n)/(math.factorial(r)*math.factorial(n-r))
return K
n = len(x)
print(x)
Psi = np.array(x)/chord
Xi = np.array(y)/chord
EndThickness = EndThickness/chord
T = np.zeros((n, n))
t = np.zeros((n, 1))
for j in range(1, n+1):
jj = j - 1
for i in range(1, n+1):
ii = i - 1
T[jj][ii] = K(i, n) * Psi[jj]**i * (1-Psi[jj])**(n-i)
print(Xi[jj], EndThickness, Psi[jj], A0, Psi[jj]**N1*(1-Psi[jj])**N2)
t[jj] = (Xi[jj] - Psi[jj]*EndThickness)/(Psi[jj]**N1*(1-Psi[jj])**N2) - A0*(1-Psi[jj])**n
# Calculate the inverse
A = np.dot(inv(T), t)
A = [A0] + list(A.transpose()[0])
return A
def calculate_strains(Au_P, Al_P, c_P, Au_C, Al_C, c_C, deltaz, psi_spars, spar_thicknesses):
# Calculate psi_flats (non-dimensional location of the itersection of
# the spars with the lower surface
psi_flats = []
for j in range(len(psi_spars)):
psi_parent_j = psi_spars[j]
# Calculate psi at landing
# psi_baseline, Au_baseline, Au_goal, deltaz, c_baseline, c_goal
psi_children_j = calculate_psi_goal(
psi_parent_j, Au_P, Au_C, deltaz, c_P, c_C, l_LE, eps_LE, psi_spars[0])
x_children_j = psi_children_j*c_C
s = calculate_spar_direction(psi_spars[j], Au_P, Au_C, deltaz, c_C, l_LE, eps_LE, psi_spars)
psi_flats.append(x_children_j - spar_thicknesses[j]*s[0])
# Calculate initial lengths
initial_lengths = []
psi_list = [0.] + psi_spars + [c_P]
for i in range(len(psi_list)-1):
initial_lengths.append(calculate_arc_length(psi_list[i], psi_list[i+1], Al_P, deltaz, c_P))
# Calculate final lengths
final_lengths = []
psi_list = [0.] + psi_flats + [c_C] # In P configuration
for i in range(len(psi_list)-1):
final_lengths.append(calculate_arc_length(
psi_list[i]*c_P/c_C, psi_list[i+1]*c_P/c_C, Al_C, deltaz, c_C))
# Calculate strains
strains = []
for i in range(len(final_lengths)):
strains.append((final_lengths[i]-initial_lengths[i])/initial_lengths[i])
av_strain = (sum(final_lengths)-sum(initial_lengths))/sum(initial_lengths)
# for i in range(len(strains)):
# print 'Initial length: ' + str(initial_lengths[i]) + ', final length: ' + str(final_lengths[i]) + ', strains: ' + str(strains[i])
return strains, av_strain
def plot_airfoil(AC, psi_spars, c_L, deltaz, Au_L, Al_L, image='plot',
iteration=0, return_coordinates=True, dir='current'):
import matplotlib.pyplot as plt
plt.figure()
n = len(Au_L) - 1
Au_C, Al_C, c_C, spar_thicknesses = calculate_dependent_shape_coefficients(
AC,
psi_spars, Au_L, Al_L,
deltaz, c_L, morphing=morphing_direction)
# ==============================================================================
# Plot results
# ==============================================================================
np.set_printoptions(precision=20)
x = np.linspace(0, c_C, 1000)
y = CST(x, c_C, deltasz=[deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
plt.plot(x, y['u'], 'b', label='Children')
plt.plot(x, y['l'], '-b', label=None)
# store variables in case return_coordinates is True
x = list(x[::-1]) + list(x[1:])
y = list(y['u'][::-1]) + list(y['l'][1:])
children_coordinates = {'x': x, 'y': y}
x = np.linspace(0, c_L, 1000)
y = CST(x, c_L, deltasz=[deltaz/2., deltaz/2.], Al=Al_L, Au=Au_L)
plt.plot(x, y['u'], 'r--', label='Parent')
plt.plot(x, y['l'], 'r--', label=None)
y_limits = y
for i in range(len(psi_spars)):
psi_i = psi_spars[i]
# Calculate psi at landing
psi_goal_i = calculate_psi_goal(psi_i, Au_C, Au_L, deltaz, c_C, c_L)
x_goal_i = psi_goal_i*c_L
# Calculate xi at landing
temp = CST(x_goal_i, c_L, [deltaz/2., deltaz/2.], Al=Al_L, Au=Au_L)
y_goal_i = temp['u']
# calculate spar direction
s = calculate_spar_direction(psi_i, Au_C, Au_L, deltaz, c_L)
plt.plot([x_goal_i, x_goal_i - spar_thicknesses[i]*s[0]],
[y_goal_i, y_goal_i - spar_thicknesses[i]*s[1]], 'r--')
y = CST(np.array([psi_i*c_C]), c_C, deltasz=[deltaz/2., deltaz/2.], Al=Al_C, Au=Au_C)
plt.plot([psi_i*c_C, psi_i*c_C], [y['u'], y['u']-spar_thicknesses[i]], 'b', label=None)
plt.xlabel('$\psi$', fontsize=16)
plt.ylabel(r'$\xi$', fontsize=16)
plt.grid()
plt.legend(loc="upper right")
plt.gca().set_aspect('equal', adjustable='box')
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, 2*y2))
# plt.axis([-0.005, c_L+0.005, min(y_limits['l'])-0.005, max(y_limits['l'])+0.01])
if image == 'plot':
plt.show()
elif image == 'save':
if dir == 'current':
plt.savefig('%03i.png' % (iteration), bbox_inches='tight')
else:
cwd = os.getcwd()
directory = os.path.join(cwd, dir)
if not os.path.exists(directory):
os.makedirs(directory)
filename = os.path.join(directory, '%05i.png' % (iteration))
plt.savefig(filename, bbox_inches='tight')
if return_coordinates:
return children_coordinates
|
|
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
This module describes fuel use with considerations of unit commitment
and incremental heat rates using piecewise linear expressions. If you
want to use this module directly in a list of switch modules (instead of
including the package project.unitcommit), you will also need to include
the module operations.unitcommit.commit
If you haven't worked with incremental heat rates before, you may want
to start by reading a background document on incremental heat rates such
as: http://www.energy.ca.gov/papers/98-04-07_HEATRATE.PDF
Incremental heat rates are a way of approximating an "input-output
curve" (heat input vs electricity output) with a series of line
segments. These curves are typically drawn with electricity output on
the x-axis (Power, MW) and fuel use rates on the y-axis (MMBTU/h). These
curves are drawn from the minimum to maximum power output levels for a
given generator, and most generators cannot run at 0 output. The slope
of each line segment is the incremental heat rate at that point in units
of MMBTU/MWh.
Data for incremental heat rates is typically formatted in a heterogenous
manner. The first data point is the first point on the curve - the
minimum loading level (MW) and its corresponding fuel use rate
(MMBTU/h). Subsequent data points provide subseqent loading levels in MW
and slopes, or incremental heat rates in MMBTU/MWh. This format was
designed to make certain economic calculations easy, not to draw input-
output curves, but you can calculate subsequent points on the curve from
this information.
Fuel requirements for most generators can be approximated very well with
simple models of a single line segment, but the gold standard is to use
several line segments that have increasing slopes. In the future, we may
include a simpler model that uses a single line segment, but we are just
implementing the complex piecewise linear form initially to satisfy key
stakeholders.
There are two basic ways to model a piecewise linear relationship like
this in linear programming. The first approach (which we don't use in
this module) is to divide the energy production variable into several
subvariables (one for each line segment), and put an upper bound on each
subvariable so that it can't exceed the width of the segment. The total
energy production is the sum of the sub-variables, and the total fuel
consumption is: Fuel = line0_intercept + E0*incremental_heat_rate0 +
E1*incremental_heat_rate1 + ... As long as each incremental_heat_rate is
larger than the one before it, then the optimization will ensure that E1
remains at 0 until E0 is at its upper limit, which ensures consistent
results. This tiered decision method is used in the fuel_markets module,
but is not used here.
This module uses the second approach which is to make FuelUse into a
decision variable that must be greater than or equal to each of the
lines. As long as fuel has a cost associated with it, a cost minimizing
optimization will push the fuel use down till it touchs a line segments.
This method also requires that incremental heat rates increase with
energy production so that the lines collectively form a convex boundary
for fuel use.
"""
import os
from pyomo.environ import *
import csv
from switch_mod.utilities import approx_equal
dependencies = 'switch_mod.timescales', 'switch_mod.load_zones',\
'switch_mod.financials.minimize_cost', 'switch_mod.energy_sources', \
'switch_mod.investment.proj_build', 'switch_mod.operations.proj_dispatch',\
'switch_mod.operations.unitcommit.commit'
def define_components(mod):
"""
This function adds components to a Pyomo abstract model object to
describe fuel consumption in the context of unit commitment. Unless
otherwise stated, all power capacity is specified in units of MW and
all sets and parameters are mandatory.
Typically incremental heat rates tables specify "blocks" where each
block includes power output in MW and heat requirements in MMBTU/hr
to move from the prior block to the current block. If you plot these
points and connect the dots, you have a piecewise linear function
that goes from at least minimum loading level to maximum loading
level. Data is read in in that format, then processed to describe
the individual line segments.
GEN_FUEL_USE_SEGMENTS[g in GEN_TECH_WITH_FUEL] is a set of line segments
that collectively describe fuel requirements for a given generation
technology. Each element of this set is a tuple of (y-intercept,
slope) where the y-intercept is in units of MMBTU/(hr * MW-capacity)
and slope is incremental heat rate in units of MMBTU / MWh-energy.
We normalize the y-intercept by capacity so that we can scale it to
arbitrary sizes of generation, or stacks of individual generation
units. This code can be used in conjunction with discrete unit sizes
but it not dependent on that. This set is optional.
PROJ_FUEL_USE_SEGMENTS[proj in FUEL_BASED_PROJECTS] is the same as
GEN_FUEL_USE_SEGMENTS but scoped to projects. This set is optional
and will default to GEN_FUEL_USE_SEGMENTS if that is available;
otherwise it will default to an intercept of 0 and a slope of its
full load heat rate.
"""
mod.PROJ_FUEL_USE_SEGMENTS = Set(
mod.FUEL_BASED_PROJECTS,
dimen=2)
# Use BuildAction to populate a set's default values.
def PROJ_FUEL_USE_SEGMENTS_default_rule(m, pr):
if pr not in m.PROJ_FUEL_USE_SEGMENTS:
heat_rate = m.proj_full_load_heat_rate[pr]
m.PROJ_FUEL_USE_SEGMENTS[pr] = [(0, heat_rate)]
mod.PROJ_FUEL_USE_SEGMENTS_default = BuildAction(
mod.FUEL_BASED_PROJECTS,
rule=PROJ_FUEL_USE_SEGMENTS_default_rule)
mod.PROJ_DISP_FUEL_PIECEWISE_CONS_SET = Set(
dimen=4,
initialize=lambda m: [
(proj, t, intercept, slope)
for (proj, t) in m.PROJ_WITH_FUEL_DISPATCH_POINTS
for (intercept, slope) in m.PROJ_FUEL_USE_SEGMENTS[proj]
]
)
mod.ProjFuelUseRate_Calculate = Constraint(
mod.PROJ_DISP_FUEL_PIECEWISE_CONS_SET,
rule=lambda m, pr, t, intercept, incremental_heat_rate: (
sum(m.ProjFuelUseRate[pr, t, f] for f in m.PROJ_FUELS[pr]) >=
# Do the startup
m.Startup[pr, t] * m.proj_startup_fuel[pr] / m.tp_duration_hrs[t] +
intercept * m.CommitProject[pr, t] +
incremental_heat_rate * m.DispatchProj[pr, t]))
# TODO: switch to defining heat rates as a collection of (output_mw, fuel_mmbtu_per_h) points;
# read those directly as normal sets, then derive the project heat rate curves from those
# within define_components.
# This will simplify data preparation (the current format is hard to produce from any
# normalized database) and the import code and help the readability of this file.
def load_inputs(mod, switch_data, inputs_dir):
"""
Import data to support modeling fuel use under partial loading
conditions with piecewise linear incremental heat rates.
These files are formatted differently than most to match the
standard format of incremental heat rates. This format is peculiar
because it formats data records that describes a fuel use curve in
two disticnt ways. The first record is the first point on the curve,
but all subsequent records are slopes and x-domain for each line
segment. For a given generation technology or project, the relevant
data should be formatted like so:
power_start_mw power_end_mw ihr fuel_use_rate
min_load . . value
min_load mid_load1 value .
mid_load1 max_load value .
The first row provides the first point on the input/output curve.
Literal dots should be included to indicate blanks.
The column fuel_use_rate is in units of MMBTU/h.
Subsequent rows provide the domain and slope of each line segement.
The column ihr indicates incremental heat rate in MMBTU/MWh.
Any number of line segments will be accepted.
All text should be replaced with actual numerical values.
I chose this format to a) be relatively consistent with standard
data that is easiest to find, b) make it difficult to misinterpret
the meaning of the data, and c) allow all of the standard data to be
included in a single file.
The following files are optional. If no representative data is
provided for a generation technology, it will default to a single
line segment with an intercept of 0 and a slope equal to the full
load heat22 rate. If no specific data is provided for a project, it
will default to its generation technology.
proj_inc_heat_rates.tab
project, power_start_mw, power_end_mw,
incremental_heat_rate_mbtu_per_mwhr, fuel_use_rate_mmbtu_per_h
"""
path = os.path.join(inputs_dir, 'proj_inc_heat_rates.tab')
if os.path.isfile(path):
(fuel_rate_segments, min_load, full_hr) = _parse_inc_heat_rate_file(
path, id_column="project")
# Check implied minimum loading level for consistency with
# proj_min_load_fraction if proj_min_load_fraction was provided. If
# proj_min_load_fraction wasn't provided, set it to implied minimum
# loading level.
for pr in min_load:
if 'proj_min_load_fraction' not in switch_data.data():
switch_data.data()['proj_min_load_fraction'] = {}
dp_dict = switch_data.data(name='proj_min_load_fraction')
if pr in dp_dict:
min_load_dat = dp_dict[pr]
if not approx_equal(min_load[pr], min_load_dat):
raise ValueError((
"proj_min_load_fraction is inconsistant with " +
"incremental heat rate data for project " +
"{}.").format(pr))
else:
dp_dict[pr] = min_load[pr]
# Same thing, but for full load heat rate.
for pr in full_hr:
if 'proj_full_load_heat_rate' not in switch_data.data():
switch_data.data()['proj_full_load_heat_rate'] = {}
dp_dict = switch_data.data(name='proj_full_load_heat_rate')
if pr in dp_dict:
full_hr_dat = dp_dict[pr]
if abs((full_hr[pr] - full_hr_dat) / full_hr_dat) > 0.01:
raise ValueError((
"proj_full_load_heat_rate is inconsistant with " +
"incremental heat rate data for project " +
"{}.").format(pr))
else:
dp_dict[pr] = full_hr[pr]
# Copy parsed data into the data portal.
switch_data.data()['PROJ_FUEL_USE_SEGMENTS'] = fuel_rate_segments
def _parse_inc_heat_rate_file(path, id_column):
"""
Parse tabular incremental heat rate data, calculate a series of
lines that describe each segment, and perform various error checks.
"""
# fuel_rate_points[unit] = {min_power: fuel_use_rate}
fuel_rate_points = {}
# fuel_rate_segments[unit] = [(intercept1, slope1), (int2, slope2)...]
# Stores the description of each linear segment of a fuel rate curve.
fuel_rate_segments = {}
# ihr_dat stores incremental heat rate records as a list for each unit
ihr_dat = {}
# min_cap_factor[unit] and full_load_hr[unit] are for error checking.
min_cap_factor = {}
full_load_hr = {}
# Scan the file and stuff the data into dictionaries for easy access.
# Parse the file and stuff data into dictionaries indexed by units.
with open(path, 'rb') as hr_file:
dat = list(csv.DictReader(hr_file, delimiter='\t'))
for row in dat:
u = row[id_column]
p1 = float(row['power_start_mw'])
p2 = row['power_end_mw']
ihr = row['incremental_heat_rate_mbtu_per_mwhr']
fr = row['fuel_use_rate_mmbtu_per_h']
# Does this row give the first point?
if(p2 == '.' and ihr == '.'):
fr = float(fr)
if(u in fuel_rate_points):
raise ValueError(
"Error processing incremental heat rates for " +
u + " in " + path + ". More than one row has " +
"a fuel use rate specified.")
fuel_rate_points[u] = {p1: fr}
# Does this row give a line segment?
elif(fr == '.'):
p2 = float(p2)
ihr = float(ihr)
if(u not in ihr_dat):
ihr_dat[u] = []
ihr_dat[u].append((p1, p2, ihr))
# Throw an error if the row's format is not recognized.
else:
raise ValueError(
"Error processing incremental heat rates for row " +
u + " in " + path + ". Row format not recognized for " +
"row " + str(row) + ". See documentation for acceptable " +
"formats.")
# Make sure that each project that has incremental heat rates defined
# also has a starting point defined.
missing_starts = [k for k in ihr_dat if k not in fuel_rate_points]
if missing_starts:
raise ValueError(
'No starting point(s) are defined for incremental heat rate curves '
'for the following technologies: {}'.format(','.join(missing_starts)))
# Construct a convex combination of lines describing a fuel use
# curve for each representative unit "u".
for u, fr_points in fuel_rate_points.items():
if u not in ihr_dat:
# no heat rate segments specified; plant can only be off or on at full power
# create a dummy curve at full heat rate
output, fuel = fr_points.items()[0]
fuel_rate_segments[u] = [(0.0, fuel / output)]
min_cap_factor[u] = 1.0
full_load_hr[u] = fuel / output
continue
fuel_rate_segments[u] = []
# Sort the line segments by their domains.
ihr_dat[u].sort()
# Assume that the maximum power output is the rated capacity.
(junk, capacity, junk) = ihr_dat[u][len(ihr_dat[u])-1]
# Retrieve the first incremental heat rate for error checking.
(min_power, junk, ihr_prev) = ihr_dat[u][0]
min_cap_factor[u] = min_power / capacity
# Process each line segment.
for (p_start, p_end, ihr) in ihr_dat[u]:
# Error check: This incremental heat rate cannot be less than
# the previous one.
if ihr_prev > ihr:
raise ValueError((
"Error processing incremental heat rates for " +
"{} in file {}. The incremental heat rate " +
"between power output levels {}-{} is less than " +
"that of the prior line segment.").format(
u, path, p_start, p_end))
# Error check: This segment needs to start at an existing point.
if p_start not in fr_points:
raise ValueError((
"Error processing incremental heat rates for " +
"{} in file {}. The incremental heat rate " +
"between power output levels {}-{} does not start at a " +
"previously defined point or line segment.").format(
u, path, p_start, p_end))
# Calculate the y-intercept then normalize it by the capacity.
intercept_norm = (fr_points[p_start] - ihr * p_start) / capacity
# Save the line segment's definition.
fuel_rate_segments[u].append((intercept_norm, ihr))
# Add a point for the end of the segment for the next iteration.
fr_points[p_end] = fr_points[p_start] + (p_end - p_start) * ihr
ihr_prev = ihr
# Calculate the max load heat rate for error checking
full_load_hr[u] = fr_points[capacity] / capacity
return (fuel_rate_segments, min_cap_factor, full_load_hr)
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
from st2actions.runners import pythonrunner
from st2actions.runners.pythonrunner import Action
from st2actions.container import service
from st2actions.runners.utils import get_action_class_instance
from st2common.constants.action import ACTION_OUTPUT_RESULT_DELIMITER
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED, LIVEACTION_STATUS_FAILED
from st2common.constants.pack import SYSTEM_PACK_NAME
from base import RunnerTestCase
import st2tests.base as tests_base
import st2tests.config as tests_config
PACAL_ROW_ACTION_PATH = os.path.join(tests_base.get_resources_path(), 'packs',
'pythonactions/actions/pascal_row.py')
# Note: runner inherits parent args which doesn't work with tests since test pass additional
# unrecognized args
mock_sys = mock.Mock()
mock_sys.argv = []
@mock.patch('st2actions.runners.pythonrunner.sys', mock_sys)
class PythonRunnerTestCase(RunnerTestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_runner_creation(self):
runner = pythonrunner.get_runner()
self.assertTrue(runner is not None, 'Creation failed. No instance.')
self.assertEqual(type(runner), pythonrunner.PythonRunner, 'Creation failed. No instance.')
def test_simple_action(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({'row_index': 4})
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue(result is not None)
self.assertEqual(result['result'], [1, 4, 6, 4, 1])
def test_simple_action_fail(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({'row_index': '4'})
self.assertTrue(result is not None)
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
def test_simple_action_no_file(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = 'foo.py'
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(status, result, _) = runner.run({})
self.assertTrue(result is not None)
self.assertEqual(status, LIVEACTION_STATUS_FAILED)
def test_simple_action_no_entry_point(self):
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = ''
runner.container_service = service.RunnerContainerService()
expected_msg = 'Action .*? is missing entry_point attribute'
self.assertRaisesRegexp(Exception, expected_msg, runner.run, {})
@mock.patch('st2common.util.green.shell.subprocess.Popen')
def test_action_with_user_supplied_env_vars(self, mock_popen):
env_vars = {'key1': 'val1', 'key2': 'val2', 'PYTHONPATH': 'foobar'}
mock_process = mock.Mock()
mock_process.communicate.return_value = ('', '')
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {'env': env_vars}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, _, _) = runner.run({'row_index': 4})
_, call_kwargs = mock_popen.call_args
actual_env = call_kwargs['env']
for key, value in env_vars.items():
# Verify that a blacklsited PYTHONPATH has been filtered out
if key == 'PYTHONPATH':
self.assertTrue(actual_env[key] != value)
else:
self.assertEqual(actual_env[key], value)
@mock.patch('st2common.util.green.shell.subprocess.Popen')
def test_stdout_interception_and_parsing(self, mock_popen):
values = {'delimiter': ACTION_OUTPUT_RESULT_DELIMITER}
# No output to stdout and no result (implicit None)
mock_stdout = '%(delimiter)sNone%(delimiter)s' % values
mock_stderr = 'foo stderr'
mock_process = mock.Mock()
mock_process.communicate.return_value = (mock_stdout, mock_stderr)
mock_process.returncode = 0
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, output, _) = runner.run({'row_index': 4})
self.assertEqual(output['stdout'], '')
self.assertEqual(output['stderr'], mock_stderr)
self.assertEqual(output['result'], 'None')
self.assertEqual(output['exit_code'], 0)
# Output to stdout and no result (implicit None)
mock_stdout = 'pre result%(delimiter)sNone%(delimiter)spost result' % values
mock_stderr = 'foo stderr'
mock_process = mock.Mock()
mock_process.communicate.return_value = (mock_stdout, mock_stderr)
mock_process.returncode = 0
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, output, _) = runner.run({'row_index': 4})
self.assertEqual(output['stdout'], 'pre resultpost result')
self.assertEqual(output['stderr'], mock_stderr)
self.assertEqual(output['result'], 'None')
self.assertEqual(output['exit_code'], 0)
@mock.patch('st2common.util.green.shell.subprocess.Popen')
def test_common_st2_env_vars_are_available_to_the_action(self, mock_popen):
mock_process = mock.Mock()
mock_process.communicate.return_value = ('', '')
mock_popen.return_value = mock_process
runner = pythonrunner.get_runner()
runner.auth_token = mock.Mock()
runner.auth_token.token = 'ponies'
runner.action = self._get_mock_action_obj()
runner.runner_parameters = {}
runner.entry_point = PACAL_ROW_ACTION_PATH
runner.container_service = service.RunnerContainerService()
runner.pre_run()
(_, _, _) = runner.run({'row_index': 4})
_, call_kwargs = mock_popen.call_args
actual_env = call_kwargs['env']
self.assertCommonSt2EnvVarsAvailableInEnv(env=actual_env)
def test_action_class_instantiation_action_service_argument(self):
class Action1(Action):
# Constructor not overriden so no issue here
pass
def run(self):
pass
class Action2(Action):
# Constructor overriden, but takes action_service argument
def __init__(self, config, action_service=None):
super(Action2, self).__init__(config=config,
action_service=action_service)
def run(self):
pass
class Action3(Action):
# Constructor overriden, but doesn't take to action service
def __init__(self, config):
super(Action3, self).__init__(config=config)
def run(self):
pass
config = {'a': 1, 'b': 2}
action_service = 'ActionService!'
action1 = get_action_class_instance(action_cls=Action1, config=config,
action_service=action_service)
self.assertEqual(action1.config, config)
self.assertEqual(action1.action_service, action_service)
action2 = get_action_class_instance(action_cls=Action2, config=config,
action_service=action_service)
self.assertEqual(action2.config, config)
self.assertEqual(action2.action_service, action_service)
action3 = get_action_class_instance(action_cls=Action3, config=config,
action_service=action_service)
self.assertEqual(action3.config, config)
self.assertEqual(action3.action_service, action_service)
def _get_mock_action_obj(self):
"""
Return mock action object.
Pack gets set to the system pack so the action doesn't require a separate virtualenv.
"""
action = mock.Mock()
action.pack = SYSTEM_PACK_NAME
action.entry_point = 'foo.py'
return action
|
|
"""
sphinx.pycode.ast
~~~~~~~~~~~~~~~~~
Helpers for AST (Abstract Syntax Tree).
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from typing import Dict, List, Optional, Type, overload
if sys.version_info > (3, 8):
import ast
else:
try:
# use typed_ast module if installed
from typed_ast import ast3 as ast
except ImportError:
import ast # type: ignore
OPERATORS: Dict[Type[ast.AST], str] = {
ast.Add: "+",
ast.And: "and",
ast.BitAnd: "&",
ast.BitOr: "|",
ast.BitXor: "^",
ast.Div: "/",
ast.FloorDiv: "//",
ast.Invert: "~",
ast.LShift: "<<",
ast.MatMult: "@",
ast.Mult: "*",
ast.Mod: "%",
ast.Not: "not",
ast.Pow: "**",
ast.Or: "or",
ast.RShift: ">>",
ast.Sub: "-",
ast.UAdd: "+",
ast.USub: "-",
}
def parse(code: str, mode: str = 'exec') -> "ast.AST":
"""Parse the *code* using the built-in ast or typed_ast libraries.
This enables "type_comments" feature if possible.
"""
try:
# type_comments parameter is available on py38+
return ast.parse(code, mode=mode, type_comments=True) # type: ignore
except SyntaxError:
# Some syntax error found. To ignore invalid type comments, retry parsing without
# type_comments parameter (refs: https://github.com/sphinx-doc/sphinx/issues/8652).
return ast.parse(code, mode=mode)
except TypeError:
# fallback to ast module.
# typed_ast is used to parse type_comments if installed.
return ast.parse(code, mode=mode)
@overload
def unparse(node: None, code: str = '') -> None:
...
@overload
def unparse(node: ast.AST, code: str = '') -> str:
...
def unparse(node: Optional[ast.AST], code: str = '') -> Optional[str]:
"""Unparse an AST to string."""
if node is None:
return None
elif isinstance(node, str):
return node
return _UnparseVisitor(code).visit(node)
# a greatly cut-down version of `ast._Unparser`
class _UnparseVisitor(ast.NodeVisitor):
def __init__(self, code: str = '') -> None:
self.code = code
def _visit_op(self, node: ast.AST) -> str:
return OPERATORS[node.__class__]
for _op in OPERATORS:
locals()['visit_{}'.format(_op.__name__)] = _visit_op
def visit_arg(self, node: ast.arg) -> str:
if node.annotation:
return "%s: %s" % (node.arg, self.visit(node.annotation))
else:
return node.arg
def _visit_arg_with_default(self, arg: ast.arg, default: Optional[ast.AST]) -> str:
"""Unparse a single argument to a string."""
name = self.visit(arg)
if default:
if arg.annotation:
name += " = %s" % self.visit(default)
else:
name += "=%s" % self.visit(default)
return name
def visit_arguments(self, node: ast.arguments) -> str:
defaults: List[Optional[ast.expr]] = list(node.defaults)
positionals = len(node.args)
posonlyargs = 0
if hasattr(node, "posonlyargs"): # for py38+
posonlyargs += len(node.posonlyargs) # type:ignore
positionals += posonlyargs
for _ in range(len(defaults), positionals):
defaults.insert(0, None)
kw_defaults: List[Optional[ast.expr]] = list(node.kw_defaults)
for _ in range(len(kw_defaults), len(node.kwonlyargs)):
kw_defaults.insert(0, None)
args: List[str] = []
if hasattr(node, "posonlyargs"): # for py38+
for i, arg in enumerate(node.posonlyargs): # type: ignore
args.append(self._visit_arg_with_default(arg, defaults[i]))
if node.posonlyargs: # type: ignore
args.append('/')
for i, arg in enumerate(node.args):
args.append(self._visit_arg_with_default(arg, defaults[i + posonlyargs]))
if node.vararg:
args.append("*" + self.visit(node.vararg))
if node.kwonlyargs and not node.vararg:
args.append('*')
for i, arg in enumerate(node.kwonlyargs):
args.append(self._visit_arg_with_default(arg, kw_defaults[i]))
if node.kwarg:
args.append("**" + self.visit(node.kwarg))
return ", ".join(args)
def visit_Attribute(self, node: ast.Attribute) -> str:
return "%s.%s" % (self.visit(node.value), node.attr)
def visit_BinOp(self, node: ast.BinOp) -> str:
return " ".join(self.visit(e) for e in [node.left, node.op, node.right])
def visit_BoolOp(self, node: ast.BoolOp) -> str:
op = " %s " % self.visit(node.op)
return op.join(self.visit(e) for e in node.values)
def visit_Call(self, node: ast.Call) -> str:
args = ([self.visit(e) for e in node.args] +
["%s=%s" % (k.arg, self.visit(k.value)) for k in node.keywords])
return "%s(%s)" % (self.visit(node.func), ", ".join(args))
def visit_Constant(self, node: ast.Constant) -> str: # type: ignore
if node.value is Ellipsis:
return "..."
elif isinstance(node.value, (int, float, complex)):
if self.code and sys.version_info > (3, 8):
return ast.get_source_segment(self.code, node) # type: ignore
else:
return repr(node.value)
else:
return repr(node.value)
def visit_Dict(self, node: ast.Dict) -> str:
keys = (self.visit(k) for k in node.keys)
values = (self.visit(v) for v in node.values)
items = (k + ": " + v for k, v in zip(keys, values))
return "{" + ", ".join(items) + "}"
def visit_Index(self, node: ast.Index) -> str:
return self.visit(node.value)
def visit_Lambda(self, node: ast.Lambda) -> str:
return "lambda %s: ..." % self.visit(node.args)
def visit_List(self, node: ast.List) -> str:
return "[" + ", ".join(self.visit(e) for e in node.elts) + "]"
def visit_Name(self, node: ast.Name) -> str:
return node.id
def visit_Set(self, node: ast.Set) -> str:
return "{" + ", ".join(self.visit(e) for e in node.elts) + "}"
def visit_Subscript(self, node: ast.Subscript) -> str:
def is_simple_tuple(value: ast.AST) -> bool:
return (
isinstance(value, ast.Tuple) and
bool(value.elts) and
not any(isinstance(elt, ast.Starred) for elt in value.elts)
)
if is_simple_tuple(node.slice):
elts = ", ".join(self.visit(e) for e in node.slice.elts) # type: ignore
return "%s[%s]" % (self.visit(node.value), elts)
elif isinstance(node.slice, ast.Index) and is_simple_tuple(node.slice.value):
elts = ", ".join(self.visit(e) for e in node.slice.value.elts) # type: ignore
return "%s[%s]" % (self.visit(node.value), elts)
else:
return "%s[%s]" % (self.visit(node.value), self.visit(node.slice))
def visit_UnaryOp(self, node: ast.UnaryOp) -> str:
return "%s %s" % (self.visit(node.op), self.visit(node.operand))
def visit_Tuple(self, node: ast.Tuple) -> str:
if len(node.elts) == 0:
return "()"
elif len(node.elts) == 1:
return "(%s,)" % self.visit(node.elts[0])
else:
return "(" + ", ".join(self.visit(e) for e in node.elts) + ")"
if sys.version_info < (3, 8):
# these ast nodes were deprecated in python 3.8
def visit_Bytes(self, node: ast.Bytes) -> str:
return repr(node.s)
def visit_Ellipsis(self, node: ast.Ellipsis) -> str:
return "..."
def visit_NameConstant(self, node: ast.NameConstant) -> str:
return repr(node.value)
def visit_Num(self, node: ast.Num) -> str:
return repr(node.n)
def visit_Str(self, node: ast.Str) -> str:
return repr(node.s)
def generic_visit(self, node):
raise NotImplementedError('Unable to parse %s object' % type(node).__name__)
|
|
# -*- coding: utf-8 -*-
import os
import six
import sqlalchemy
from sqlalchemy import *
from migrate.versioning import genmodel, schemadiff
from migrate.changeset import schema
from migrate.tests import fixture
class TestSchemaDiff(fixture.DB):
table_name = 'tmp_schemadiff'
level = fixture.DB.CONNECT
def _setup(self, url):
super(TestSchemaDiff, self)._setup(url)
self.meta = MetaData(self.engine)
self.meta.reflect()
self.meta.drop_all() # in case junk tables are lying around in the test database
self.meta = MetaData(self.engine)
self.meta.reflect() # needed if we just deleted some tables
self.table = Table(self.table_name, self.meta,
Column('id',Integer(), primary_key=True),
Column('name', UnicodeText()),
Column('data', UnicodeText()),
)
def _teardown(self):
if self.table.exists():
self.meta = MetaData(self.engine)
self.meta.reflect()
self.meta.drop_all()
super(TestSchemaDiff, self)._teardown()
def _applyLatestModel(self):
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
genmodel.ModelGenerator(diff,self.engine).runB2A()
# NOTE(mriedem): DB2 handles UnicodeText as LONG VARGRAPHIC
# so the schema diffs on the columns don't work with this test.
@fixture.usedb(not_supported='ibm_db_sa')
def test_functional(self):
def assertDiff(isDiff, tablesMissingInDatabase, tablesMissingInModel, tablesWithDiff):
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
self.assertEqual(
(diff.tables_missing_from_B,
diff.tables_missing_from_A,
list(diff.tables_different.keys()),
bool(diff)),
(tablesMissingInDatabase,
tablesMissingInModel,
tablesWithDiff,
isDiff)
)
# Model is defined but database is empty.
assertDiff(True, [self.table_name], [], [])
# Check Python upgrade and downgrade of database from updated model.
diff = schemadiff.getDiffOfModelAgainstDatabase(self.meta, self.engine, excludeTables=['migrate_version'])
decls, upgradeCommands, downgradeCommands = genmodel.ModelGenerator(diff,self.engine).genB2AMigration()
# Feature test for a recent SQLa feature;
# expect different output in that case.
if repr(String()) == 'String()':
self.assertEqualIgnoreWhitespace(decls, '''
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tmp_schemadiff = Table('tmp_schemadiff', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', UnicodeText),
Column('data', UnicodeText),
)
''')
else:
self.assertEqualIgnoreWhitespace(decls, '''
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
tmp_schemadiff = Table('tmp_schemadiff', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', UnicodeText(length=None)),
Column('data', UnicodeText(length=None)),
)
''')
# Create table in database, now model should match database.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Check Python code gen from database.
diff = schemadiff.getDiffOfModelAgainstDatabase(MetaData(), self.engine, excludeTables=['migrate_version'])
src = genmodel.ModelGenerator(diff,self.engine).genBDefinition()
namespace = {}
six.exec_(src, namespace)
c1 = Table('tmp_schemadiff', self.meta, autoload=True).c
c2 = namespace['tmp_schemadiff'].c
self.compare_columns_equal(c1, c2, ['type'])
# TODO: get rid of ignoring type
if not self.engine.name == 'oracle':
# Add data, later we'll make sure it's still present.
result = self.engine.execute(self.table.insert(), id=1, name=u'mydata')
dataId = result.inserted_primary_key[0]
# Modify table in model (by removing it and adding it back to model)
# Drop column data, add columns data2 and data3.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',Integer(),nullable=True),
Column('data3',Integer(),nullable=True),
)
assertDiff(True, [], [], [self.table_name])
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Drop column data3, add data4
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',Integer(),nullable=True),
Column('data4',Float(),nullable=True),
)
assertDiff(True, [], [], [self.table_name])
diff = schemadiff.getDiffOfModelAgainstDatabase(
self.meta, self.engine, excludeTables=['migrate_version'])
decls, upgradeCommands, downgradeCommands = genmodel.ModelGenerator(diff,self.engine).genB2AMigration(indent='')
# decls have changed since genBDefinition
six.exec_(decls, namespace)
# migration commands expect a namespace containing migrate_engine
namespace['migrate_engine'] = self.engine
# run the migration up and down
six.exec_(upgradeCommands, namespace)
assertDiff(False, [], [], [])
six.exec_(decls, namespace)
six.exec_(downgradeCommands, namespace)
assertDiff(True, [], [], [self.table_name])
six.exec_(decls, namespace)
six.exec_(upgradeCommands, namespace)
assertDiff(False, [], [], [])
if not self.engine.name == 'oracle':
# Make sure data is still present.
result = self.engine.execute(self.table.select(self.table.c.id==dataId))
rows = result.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].name, 'mydata')
# Add data, later we'll make sure it's still present.
result = self.engine.execute(self.table.insert(), id=2, name=u'mydata2', data2=123)
dataId2 = result.inserted_primary_key[0]
# Change column type in model.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',String(255),nullable=True),
)
# XXX test type diff
return
assertDiff(True, [], [], [self.table_name])
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
if not self.engine.name == 'oracle':
# Make sure data is still present.
result = self.engine.execute(self.table.select(self.table.c.id==dataId2))
rows = result.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].name, 'mydata2')
self.assertEqual(rows[0].data2, '123')
# Delete data, since we're about to make a required column.
# Not even using sqlalchemy.PassiveDefault helps because we're doing explicit column select.
self.engine.execute(self.table.delete(), id=dataId)
if not self.engine.name == 'firebird':
# Change column nullable in model.
self.meta.remove(self.table)
self.table = Table(self.table_name,self.meta,
Column('id',Integer(),primary_key=True),
Column('name',UnicodeText(length=None)),
Column('data2',String(255),nullable=False),
)
assertDiff(True, [], [], [self.table_name]) # TODO test nullable diff
# Apply latest model changes and find no more diffs.
self._applyLatestModel()
assertDiff(False, [], [], [])
# Remove table from model.
self.meta.remove(self.table)
assertDiff(True, [], [self.table_name], [])
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 OpenStack Foundation
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.functional import cached_property # noqa
from django.utils.translation import ugettext_lazy as _
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1.contrib import list_extensions as nova_list_extensions
from novaclient.v1_1 import security_group_rules as nova_rules
from novaclient.v1_1 import security_groups as nova_security_groups
from novaclient.v1_1 import servers as nova_servers
from horizon import conf
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
LOG = logging.getLogger(__name__)
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
class VNCConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class RDPConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary returned by the
novaclient.servers.get_rdp_console method.
"""
_attrs = ['url', 'type']
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server
Preserves the request info so image name can later be retrieved
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state', 'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:host', 'OS-EXT-AZ:availability_zone',
'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
import glanceclient.exc as glance_exceptions
from openstack_dashboard.api import glance
if not self.image:
return "-"
if hasattr(self.image, 'name'):
return self.image.name
if 'name' in self.image:
return self.image['name']
else:
try:
image = glance.image_get(self.request, self.image['id'])
return image.name
except glance_exceptions.ClientException:
return "-"
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
class NovaUsage(base.APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': getattr(self, "total_vcpus_usage", 0),
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_hours", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
class SecurityGroup(base.APIResourceWrapper):
"""Wrapper around novaclient.security_groups.SecurityGroup which wraps its
rules in SecurityGroupRule objects and allows access to them.
"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@cached_property
def rules(self):
"""Wraps transmitted rule info in the novaclient rule class."""
manager = nova_rules.SecurityGroupRuleManager(None)
rule_objs = [nova_rules.SecurityGroupRule(manager, rule)
for rule in self._apiresource.rules]
return [SecurityGroupRule(rule) for rule in rule_objs]
class SecurityGroupRule(base.APIResourceWrapper):
"""Wrapper for individual rules in a SecurityGroup."""
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range', 'group']
def __unicode__(self):
if 'name' in self.group:
vals = {'from': self.from_port,
'to': self.to_port,
'group': self.group['name']}
return _('ALLOW %(from)s:%(to)s from %(group)s') % vals
else:
vals = {'from': self.from_port,
'to': self.to_port,
'cidr': self.ip_range['cidr']}
return _('ALLOW %(from)s:%(to)s from %(cidr)s') % vals
# The following attributes are defined to keep compatibility with Neutron
@property
def ethertype(self):
return None
@property
def direction(self):
return 'ingress'
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'nova'
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list(self):
return [SecurityGroup(g) for g
in self.client.security_groups.list()]
def get(self, sg_id):
return SecurityGroup(self.client.security_groups.get(sg_id))
def create(self, name, desc):
return SecurityGroup(self.client.security_groups.create(name, desc))
def update(self, sg_id, name, desc):
return SecurityGroup(self.client.security_groups.update(sg_id,
name, desc))
def delete(self, security_group_id):
self.client.security_groups.delete(security_group_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
# Nova Security Group API does not use direction and ethertype fields.
sg = self.client.security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id)
return SecurityGroupRule(sg)
def rule_delete(self, security_group_rule_id):
self.client.security_group_rules.delete(security_group_rule_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
# TODO(gabriel): This needs to be moved up to novaclient, and should
# be removed once novaclient supports this call.
security_groups = []
nclient = self.client
resp, body = nclient.client.get('/servers/%s/os-security-groups'
% instance_id)
if body:
# Wrap data in SG objects as novaclient would.
sg_objs = [
nova_security_groups.SecurityGroup(
nclient.security_groups, sg, loaded=True)
for sg in body.get('security_groups', [])]
# Then wrap novaclient's object with our own. Yes, sadly wrapping
# with two layers of objects is necessary.
security_groups = [SecurityGroup(sg) for sg in sg_objs]
return security_groups
def update_instance_security_group(self, instance_id,
new_security_group_ids):
try:
all_groups = self.list()
except Exception:
raise Exception(_("Couldn't get security group list."))
wanted_groups = set([sg.name for sg in all_groups
if sg.id in new_security_group_ids])
try:
current_groups = self.list_by_instance(instance_id)
except Exception:
raise Exception(_("Couldn't get current security group "
"list for instance %s.")
% instance_id)
current_group_names = set([sg.name for sg in current_groups])
groups_to_add = wanted_groups - current_group_names
groups_to_remove = current_group_names - wanted_groups
num_groups_to_modify = len(groups_to_add | groups_to_remove)
try:
for group in groups_to_add:
self.client.servers.add_security_group(instance_id, group)
num_groups_to_modify -= 1
for group in groups_to_remove:
self.client.servers.remove_security_group(instance_id, group)
num_groups_to_modify -= 1
except Exception:
raise Exception(_('Failed to modify %d instance security groups.')
% num_groups_to_modify)
return True
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
class FloatingIp(base.APIResourceWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id', 'pool']
def __init__(self, fip):
fip.__setattr__('port_id', fip.instance_id)
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
def __init__(self, pool):
pool_dict = {'id': pool.name,
'name': pool.name}
super(FloatingIpPool, self).__init__(pool_dict)
class FloatingIpTarget(base.APIDictWrapper):
def __init__(self, server):
server_dict = {'name': '%s (%s)' % (server.name, server.id),
'id': server.id}
super(FloatingIpTarget, self).__init__(server_dict)
class FloatingIpManager(network_base.FloatingIpManager):
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list_pools(self):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
def list(self):
return [FloatingIp(fip)
for fip in self.client.floating_ips.list()]
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
def allocate(self, pool):
return FloatingIp(self.client.floating_ips.create(pool=pool))
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
def disassociate(self, floating_ip_id, port_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
def get_target_id_by_instance(self, instance_id):
return instance_id
def list_target_id_by_instance(self, instance_id):
return [instance_id, ]
def is_simple_associate_supported(self):
return conf.HORIZON_CONFIG["simple_ip_management"]
def novaclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('novaclient connection created using token "%s" and url "%s"' %
(request.user.token.id, base.url_for(request, 'compute')))
c = nova_client.Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=base.url_for(request, 'compute'),
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = base.url_for(request, 'compute')
return c
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(instance_id,
console_type)['console'])
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
def server_rdp_console(request, instance_id, console_type='rdp-html5'):
return RDPConsole(novaclient(request).servers.get_rdp_console(
instance_id, console_type)['console'])
def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
ephemeral=0, swap=0, metadata=None, is_public=True):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
flavorid=flavorid,
ephemeral=ephemeral,
swap=swap, is_public=is_public)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id):
return novaclient(request).flavors.get(flavor_id)
@memoized
def flavor_list(request, is_public=True):
"""Get the list of available instance sizes (flavors)."""
return novaclient(request).flavors.list(is_public=is_public)
@memoized
def flavor_access_list(request, flavor=None):
"""Get the list of access instance sizes (flavors)."""
return novaclient(request).flavor_access.list(flavor=flavor)
def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant)
def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant)
def flavor_get_extras(request, flavor_id, raw=False):
"""Get flavor extra specs."""
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return novaclient(request).keypairs.list()
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping=None,
block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=None,
disk_config=None):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
min_count=instance_count, admin_pass=admin_pass,
disk_config=disk_config), request)
def server_delete(request, instance):
novaclient(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_list(request, search_opts=None, all_tenants=False):
page_size = utils.get_page_size(request)
c = novaclient(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [Server(s, request)
for s in c.servers.list(True, search_opts)]
has_more_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT',
1000):
has_more_data = True
return (servers, has_more_data)
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_reboot(request, instance_id, soft_reboot=False):
hardness = nova_servers.REBOOT_HARD
if soft_reboot:
hardness = nova_servers.REBOOT_SOFT
novaclient(request).servers.reboot(instance_id, hardness)
def server_rebuild(request, instance_id, image_id, password=None,
disk_config=None):
return novaclient(request).servers.rebuild(instance_id, image_id,
password, disk_config)
def server_update(request, instance_id, name):
return novaclient(request).servers.update(instance_id, name=name)
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
def server_live_migrate(request, instance_id, host, block_migration=False,
disk_over_commit=False):
novaclient(request).servers.live_migrate(instance_id, host,
block_migration,
disk_over_commit)
def server_resize(request, instance_id, flavor, disk_config=None, **kwargs):
novaclient(request).servers.resize(instance_id, flavor,
disk_config, **kwargs)
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
def server_start(request, instance_id):
novaclient(request).servers.start(instance_id)
def server_stop(request, instance_id):
novaclient(request).servers.stop(instance_id)
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
novaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def get_x509_credentials(request):
return novaclient(request).certs.create()
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
def get_password(request, instance_id, private_key=None):
return novaclient(request).servers.get_password(instance_id, private_key)
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api.cinder import cinderclient # noqa
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinderclient(request).volumes.get(volume.id)
volume.name = volume_data.display_name
return volumes
def hypervisor_list(request):
return novaclient(request).hypervisors.list()
def hypervisor_stats(request):
return novaclient(request).hypervisors.statistics()
def hypervisor_search(request, query, servers=True):
return novaclient(request).hypervisors.search(query, servers)
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
# -1 is used to represent unlimited quotas
if limit.value == -1:
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def availability_zone_list(request, detailed=False):
return novaclient(request).availability_zones.list(detailed=detailed)
def service_list(request):
return novaclient(request).services.list()
def aggregate_details_list(request):
result = []
c = novaclient(request)
for aggregate in c.aggregates.list():
result.append(c.aggregates.get_details(aggregate.id))
return result
def aggregate_create(request, name, availability_zone=None):
return novaclient(request).aggregates.create(name, availability_zone)
def aggregate_delete(request, aggregate_id):
return novaclient(request).aggregates.delete(aggregate_id)
def aggregate_get(request, aggregate_id):
return novaclient(request).aggregates.get(aggregate_id)
def aggregate_update(request, aggregate_id, values):
return novaclient(request).aggregates.update(aggregate_id, values)
def host_list(request):
return novaclient(request).hosts.list()
def add_host_to_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.add_host(aggregate_id, host)
def remove_host_from_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.remove_host(aggregate_id, host)
@memoized
def list_extensions(request):
return nova_list_extensions.ListExtManager(novaclient(request)).show_all()
@memoized
def extension_supported(extension_name, request):
"""this method will determine if nova supports a given extension name.
example values for the extension_name include AdminActions, ConsoleOutput,
etc.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
def can_set_server_password():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('can_set_password', False)
|
|
# Natural Language Toolkit: Paradigm Visualisation
#
# Copyright (C) 2005 University of Melbourne
# Author: Will Hardy
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
# Parses a paradigm query and produces an XML representation of
# that query. This is part of a Python implementation of David
# Penton's paradigm visualisation model.
#This is the query XML version of "table(person, number, content)"
#
#<?xml version="1.0"?>
#<document>
# <parse-tree>
# <operator opcode="table" instruction="1">
# <operand type="domain"
# arg="horizontal">person</operand>
# <operand type="domain"
# arg="vertical">number</operand>
# <operand type="domain"
# arg="cell">content</operand>
# </operator>
# </parse-tree>
#</document>
from en.parser.nltk_lite import tokenize
from en.parser.nltk_lite import parse
from en.parser.nltk_lite.parse import cfg
from re import *
class ParadigmQuery(object):
"""
Class to read and parse a paradigm visualisation query
"""
def __init__(self, p_string=None):
"""
Construct a query.
Setup various attributes and parse given string
"""
self.nltktree = None
self.string = p_string
self.parseList = None
self.nltkTree = None
self.parseTree = None
self.xml = None
# If p_string was given, parse it
if p_string <> None:
self.parse(p_string)
def parse(self, p_string):
"""
Parses a string and stores the resulting hierarchy of "domains"
"hierarchies" and "tables"
For the sake of NLP I've parsed the string using the nltk_lite
context free grammar library.
A query is a "sentence" and can either be a domain, hierarchy or a table.
A domain is simply a word.
A hierarchy is expressed as "domain/domain"
A table is exressed as "table(sentence, sentence, sentence)"
Internally the query is represented as a nltk_lite.parse.tree
Process:
1. string is tokenized
2. develop a context free grammar
3. parse
4. convert to a tree representation
"""
self.nltktree = None
# Store the query string
self.string = p_string
"""
1. Tokenize
------------------------------------------------------------------------
"""
# Tokenize the query string, allowing only strings, parentheses,
# forward slashes and commas.
re_all = r'table[(]|\,|[)]|[/]|\w+'
data_tokens = tokenize.regexp(self.string, re_all)
"""
2. Develop a context free grammar
------------------------------------------------------------------------
"""
# Develop a context free grammar
# S = sentence, T = table, H = hierarchy, D = domain
O, T, H, D = cfg.nonterminals('O, T, H, D')
# Specify the grammar
productions = (
# A sentence can be either a table, hierarchy or domain
cfg.Production(O, [D]), cfg.Production(O, [H]), cfg.Production(O, [T]),
# A table must be the following sequence:
# "table(", sentence, comma, sentence, comma, sentence, ")"
cfg.Production(T, ['table(', O, ',', O, ',', O, ')']),
# A hierarchy must be the following sequence:
# domain, forward slash, domain
cfg.Production(H, [D, '/', D]),
# domain, forward slash, another operator
cfg.Production(H, [D, '/', O])
)
# Add domains to the cfg productions
# A domain is a token that is entirely word chars
re_domain = compile(r'^\w+$')
# Try every token and add if it matches the above regular expression
for tok in data_tokens:
if re_domain.match(tok):
prod = cfg.Production(D,[tok]),
productions = productions + prod
# Make a grammar out of our productions
grammar = cfg.Grammar(O, productions)
rd_parser = parse.RecursiveDescent(grammar)
# Tokens need to be redefined.
# It disappears after first use, and I don't know why.
tokens = tokenize.regexp(self.string, re_all)
toklist = list(tokens)
"""
3. Parse using the context free grammar
------------------------------------------------------------------------
"""
# Store the parsing.
# Only the first one, as the grammar should be completely nonambiguous.
try:
self.parseList = rd_parser.get_parse_list(toklist)[0]
except IndexError:
print "Could not parse query."
return
"""
4. Refine and convert to a Tree representation
------------------------------------------------------------------------
"""
# Set the nltk_lite.parse.tree tree for this query to the global sentence
string = str(self.parseList)
string2 = string.replace(":","").replace("')'","").replace("table(","").replace("','","").replace("'","").replace("/","")
self.nltktree = parse.tree.bracket_parse(string2)
# Store the resulting nltk_lite.parse.tree tree
self.parseTree = QuerySentence(self.nltktree)
self.xml = self.parseTree.toXML()
def getTree(self):
"""
Returns the results from the CFG parsing
"""
if self.string == None:
print "No string has been parsed. Please use parse(string)."
return None
return self.nltktree
def getXML(self):
"""
This XML is written without the use of SAX or DOM, it is a straight
translation of the parsed string. This may be slightly dangerous, but
the document is very simple. If I have time, this may be reimplemented.
"""
if self.string == None:
print "No string has been parsed. Please use parse(string)."
return None
return '<?xml version="1.0"?>\n<document><parse-tree>' + self.xml \
+ "</parse-tree></document>"
# Additional Classes for handling The various types of recursive operations
class QuerySentence(object):
"""
Handles the XML export of sentences
"""
def __init__(self, tree):
self.tree = tree
type = str(tree[0])[1:2]
# Move on, nothing to see here
if type == "O":
self.child = QuerySentence(tree[0])
self.content = self.child.content
# Get the child and replicate the data
elif type == "D":
self.child = QueryDomain(tree[0])
self.content = self.child.content
elif type == "H":
self.child = QueryHierarchy(tree[0])
self.root = self.child.root
self.leaf = self.child.leaf
elif type == "T":
self.child = QueryTable(tree[0])
self.horizontal = self.child.horizontal
self.vertical = self.child.vertical
# Otherwise, must simply be a domain...
else:
self.child = QueryDomain(tree[0])
self.content = self.child.content
self.type = self.child.type
def __str__(self):
return str(self.tree[0])
def toXML(self):
"""
Export this class to an xml string
"""
return self.child.toXML()
class QueryDomain(object):
"""
Handles the XML export of the domain operation
"""
def __init__(self, tree):
self.type = 'domain'
self.content = tree[0]
def __str__(self):
return tree[0]
def toXML(self):
"""
Export this class to an xml string
"""
return self.content
class QueryHierarchy(object):
"""
Handles the XML export of the hierarchy operation
"""
def __init__(self, tree):
self.type = 'hierarchy'
# First argument must be a Domain
self.root = QueryDomain(tree[0])
# Second argument can conceivably be anything
self.leaf = QuerySentence(tree[1])
def __str__(self):
return tree[0]
def toXML(self):
"""
Export this class to an xml string
"""
return '<operator opcode="hierarchy">' \
+ '<operand type="' + self.root.type + '" arg="root">' \
+ self.root.toXML() + "</operand>" \
+ '<operand type="' + self.leaf.type + '" arg="leaf">' \
+ self.leaf.toXML() + "</operand>" \
+ '</operator>'
class QueryTable(object):
"""
Handles the XML export of the hierarchy operation
"""
def __init__(self, tree):
"""
Simply stores attributes, passing off handling of attributes to the
QuerySentence class
"""
self.type = 'table'
self.horizontal = QuerySentence(tree[0])
self.vertical = QuerySentence(tree[1])
self.content = QuerySentence(tree[2])
def __str__(self):
return tree[0]
def toXML(self):
"""
Export this class to an xml string
"""
return '<operator opcode="table">' \
+ '<operand type="' + self.horizontal.type + '" arg="horizontal">' \
+ self.horizontal.toXML() + "</operand>" \
+ '<operand type="' + self.vertical.type + '" arg="vertical">' \
+ self.vertical.toXML() + "</operand>" \
+ '<operand type="' + self.content.type + '" arg="cell">' \
+ self.content.toXML() + "</operand>" \
+ '</operator>'
def demo():
"""
A demonstration of the use of this class
"""
query = r'table(one/two/three, four, five)'
# Print the query
print """
================================================================================
Query: ParadigmQuery(query)
================================================================================
"""
a = ParadigmQuery(query)
print query
# Print the Tree representation
print """
================================================================================
Tree: getTree()
O is an operator
T is a table
H is a hierarchy
D is a domain
================================================================================
"""
print a.getTree()
# Print the XML representation
print """
================================================================================
XML: getXML()
================================================================================
"""
print a.getXML()
# Some space
print
if __name__ == '__main__':
demo()
|
|
'''
Created on Oct 14, 2014
@author: stefan
@author: Laurent Tramoy
'''
from parser import stpcommands
from ciphers.cipher import AbstractCipher
from parser.stpcommands import getStringLeftRotate as rotl
class KetjeCipher(AbstractCipher):
"""
This class provides a model for the differential behaviour of the
Ketje authenticated encryption scheme by Guido Bertoni, Joan Daemen,
Michael Peeters, Gilles Van Assche and Ronny Van Keer.
For more information on Ketje see http://competitions.cr.yp.to/round1/ketjev11.pdf
"""
name = "ketje"
RO = [[0, 36, 3, 41, 18],
[1, 44, 10, 45, 2],
[62, 6, 43, 15, 61],
[28, 55, 25, 21, 56],
[27, 20, 39, 8, 14]]
def getFormatString(self):
"""
Returns the print format.
"""
return ['s00', 's10', 's20', 's30', 's40',
's01', 's11', 's21', 's31', 's41',
's02', 's12', 's22', 's32', 's42',
's03', 's13', 's23', 's33', 's43',
's04', 's14', 's24', 's34', 's44',
'm0', 'm1', "w"]
def createSTP(self, stp_filename, parameters):
"""
Creates an STP file for Ketje.
"""
wordsize = parameters["wordsize"]
rounds = parameters["rounds"]
weight = parameters["sweight"]
with open(stp_filename, 'w') as stp_file:
stp_file.write("% Input File for STP\n% Ketje w={} rounds={}"
"\n\n\n".format(wordsize, rounds))
# Setup variables
# 5x5 lanes of wordsize
s = ["s{}{}{}".format(x, y, i) for i in range(rounds + 1)
for y in range(5) for x in range(5)]
a = ["a{}{}{}".format(x, y, i) for i in range(rounds)
for y in range(5) for x in range(5)]
b = ["b{}{}{}".format(x, y, i) for i in range(rounds)
for y in range(5) for x in range(5)]
c = ["c{}{}".format(x, i) for i in range(rounds + 1) for x in range(5)]
d = ["d{}{}".format(x, i) for i in range(rounds + 1) for x in range(5)]
m = ["m{}{}".format(x, i) for i in range(rounds +1) for x in range(2)]
xin = ["xin{}{}{}".format(y, z, i) for i in range(rounds)
for y in range(5) for z in range (wordsize)]
xout = ["xout{}{}{}".format(y, z, i) for i in range(rounds)
for y in range(5) for z in range (wordsize)]
andOut = ["andOut{}{}{}".format(y, z, i) for i in range(rounds)
for y in range(5) for z in range (wordsize)]
# w = weight
w = ["w{}".format(i) for i in range(rounds)]
tmp = ["tmp{}{}{}".format(y, z, i) for i in range(rounds)
for y in range(5) for z in range (wordsize)]
stpcommands.setupVariables(stp_file, s, wordsize)
stpcommands.setupVariables(stp_file, a, wordsize)
stpcommands.setupVariables(stp_file, b, wordsize)
stpcommands.setupVariables(stp_file, c, wordsize)
stpcommands.setupVariables(stp_file, d, wordsize)
stpcommands.setupVariables(stp_file, w, 16)
stpcommands.setupVariables(stp_file, tmp, 5)
stpcommands.setupWeightComputationSum(stp_file, weight, w, wordsize)
stpcommands.setupVariables(stp_file, xin, 5)
stpcommands.setupVariables(stp_file, xout, 5)
stpcommands.setupVariables(stp_file, andOut, 5)
stpcommands.setupVariables(stp_file, m, wordsize)
# No all zero characteristic
stpcommands.assertNonZero(stp_file, a, wordsize)
for rnd in range(rounds):
self.setupKeccakRound(stp_file, rnd, s, a, b, c, d, wordsize,
tmp, w, m, xin, xout, andOut)
for key, value in parameters["fixedVariables"].items():
stpcommands.assertVariableValue(stp_file, key, value)
stpcommands.setupQuery(stp_file)
return
def setupKeccakRound(self, stp_file, rnd, s, a, b, c, d, wordsize, tmp,
w, m, xin, xout, andOut):
"""
Model for one round of Keccak.
"""
command = ""
#xor the state with the message for the first two words
for x in range(5):
for y in range(5):
if(x == 0 and y == 0) or (x == 1 and y == 0):
command += "ASSERT({}=BVXOR({},{}));\n".format(
a[x + 5*y + 25*rnd], s[x + 5*y + 25*rnd], m[x + 2*rnd])
else:
command += "ASSERT({}={});\n".format(
a[x + 5*y + 25*rnd], s[x + 5*y + 25*rnd])
# Linear functions
for i in range(5):
command += "ASSERT({} = BVXOR({}, BVXOR({}, BVXOR({}, BVXOR({}, {})))));\n".format(
c[i + 5*rnd], a[i + 5*0 + 25*rnd], a[i + 5*1 + 25*rnd],
a[i + 5*2 + 25*rnd], a[i + 5*3 + 25*rnd], a[i + 5*4 + 25*rnd])
# Compute intermediate values
for i in range(5):
command += "ASSERT({} = BVXOR({}, {}));\n".format(
d[i + 5*rnd], c[(i - 1) % 5 + 5*rnd],
rotl(c[(i + 1) % 5 + 5*rnd], 1, wordsize))
# Rho and Pi
for x in range(5):
for y in range(5):
new_b_index = y + 5*((2*x + 3*y) % 5) + 25*rnd
tmp_xor = "BVXOR({}, {})".format(a[x + 5*y + 25*rnd], d[x + 5*rnd])
command += "ASSERT({} = {});\n".format(
b[new_b_index], rotl(tmp_xor, self.RO[x][y], wordsize))
# Chi
rot_alpha = 2
rot_beta = 1
weight_sum = ""
for y in range(5):
for z in range(wordsize):
# Construct S-box input
command += "ASSERT({}={});\n".format(
xin[z + wordsize*y + 5*wordsize*rnd],
b[0 + 5*y + 25*rnd] + "[{0}:{0}]".format(z) + "@" +
b[1 + 5*y + 25*rnd] + "[{0}:{0}]".format(z) + "@" +
b[2 + 5*y + 25*rnd] + "[{0}:{0}]".format(z) + "@" +
b[3 + 5*y + 25*rnd] + "[{0}:{0}]".format(z) + "@" +
b[4 + 5*y + 25*rnd] + "[{0}:{0}]".format(z))
# Construct S-box output
command += "ASSERT({}={});\n".format(
xout[z + wordsize*y + 5*wordsize*rnd],
s[0 + 5*y + 25*(rnd+1)] + "[{0}:{0}]".format(z) + "@" +
s[1 + 5*y + 25*(rnd+1)] + "[{0}:{0}]".format(z) + "@" +
s[2 + 5*y + 25*(rnd+1)] + "[{0}:{0}]".format(z) + "@" +
s[3 + 5*y + 25*(rnd+1)] + "[{0}:{0}]".format(z) + "@" +
s[4 + 5*y + 25*(rnd+1)] + "[{0}:{0}]".format(z))
xin_rotalpha = rotl(xin[z + wordsize*y + 5*wordsize*rnd], rot_alpha, 5)
xin_rotbeta = rotl(xin[z + wordsize*y + 5*wordsize*rnd], rot_beta, 5)
#Deal with dependent inputs
varibits = "({0} | {1})".format(xin_rotalpha, xin_rotbeta)
doublebits = self.getDoubleBits(xin[z + wordsize*y + 5*wordsize*rnd], rot_alpha, rot_beta)
#Check for valid difference
firstcheck = "({} & ~{})".format(andOut[z + wordsize*y + 5*wordsize*rnd], varibits)
secondcheck = "(~BVXOR({}, {}) & {})".format(
andOut[z + wordsize*y + 5*wordsize*rnd], rotl(andOut[z + wordsize*y + 5*wordsize*rnd], rot_alpha - rot_beta, 5), doublebits)
thirdcheck = "(IF {0} = 0b{1} THEN BVMOD(5, {0}, 0b00010) ELSE 0b{2}ENDIF)".format(
xin[z + wordsize*y + 5*wordsize*rnd], "11111", "00000")
command += "ASSERT(({} | {} | {}) = 0b{});\n".format(firstcheck,
secondcheck, thirdcheck, "00000")
#Assert XORs
command += "ASSERT({} = BVXOR({},{}));\n".format(
xout[z + wordsize*y + 5*wordsize*rnd],
xin[z + wordsize*y + 5*wordsize*rnd],
andOut[z + wordsize*y + 5*wordsize*rnd])
#Weight computation
command += ("ASSERT({0} = (IF {1} = 0b{4} THEN BVSUB({5},0b{4},0b{6}1)"
"ELSE BVXOR({2}, {3}) ENDIF));\n".format(
tmp[z + wordsize*y + 5*wordsize*rnd],
xin[z + wordsize*y + 5*wordsize*rnd],
varibits, doublebits, "1"*5,
5, "0"*4))
weight_sum += ("0b{0}@(BVPLUS({1}, {2}[0:0], {2}[1:1], "
"{2}[2:2],{2}[3:3], {2}[4:4])),".format(
"0"*11, 5, "0b0000@" +
tmp[z + wordsize*y + 5*wordsize*rnd]))
command += "ASSERT({}=BVPLUS({},{}));\n".format(w[rnd], 16, weight_sum[:-1])
stp_file.write(command)
return
def getDoubleBits(self, xin, rot_alpha, rot_beta):
command = "({0} & ~{1} & {2})".format(
rotl(xin, rot_beta, 5), rotl(xin, rot_alpha, 5),
rotl(xin, 2*rot_alpha - rot_beta, 5))
return command
|
|
from datetime import datetime
from collections import OrderedDict
import enum
import time
from sqlalchemy import Column, Integer, String, Float, SmallInteger, BigInteger, ForeignKey, UniqueConstraint, create_engine, cast, func, desc, asc, and_, exists
from sqlalchemy.exc import DBAPIError
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.types import TypeDecorator, Numeric, Text
from sqlalchemy.dialects.mysql import TINYINT, MEDIUMINT, BIGINT
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.exc import NoResultFound
from . import utils
try:
from . import config
DB_ENGINE = config.DB_ENGINE
except (ImportError, AttributeError):
DB_ENGINE = 'sqlite:///db.sqlite'
_optional = {
'LAST_MIGRATION': 1481932800,
'SPAWN_ID_INT': True,
'RARE_IDS': [],
'REPORT_SINCE': None,
'BOUNDARIES': None,
'STAY_WITHIN_MAP': True,
'MORE_POINTS': True
}
for setting_name, default in _optional.items():
if not hasattr(config, setting_name):
setattr(config, setting_name, default)
del _optional
if config.BOUNDARIES:
try:
from shapely.geometry import Polygon, Point
if not isinstance(config.BOUNDARIES, Polygon):
raise TypeError('BOUNDARIES must be a shapely Polygon.')
except ImportError as e:
raise ImportError('BOUNDARIES is set but shapely is not available.') from e
try:
if config.LAST_MIGRATION > time.time():
raise ValueError('LAST_MIGRATION must be a timestamp from the past.')
except TypeError as e:
raise TypeError('LAST_MIGRATION must be a numeric timestamp.') from e
class Team(enum.Enum):
none = 0
mystic = 1
valor = 2
instict = 3
if DB_ENGINE.startswith('mysql'):
TINY_TYPE = TINYINT(unsigned=True) # 0 to 255
MEDIUM_TYPE = MEDIUMINT(unsigned=True) # 0 to 4294967295
HUGE_TYPE = BIGINT(unsigned=True) # 0 to 18446744073709551615
elif DB_ENGINE.startswith('postgres'):
class NumInt(TypeDecorator):
'''Modify Numeric type for integers'''
impl = Numeric
def process_bind_param(self, value, dialect):
return int(value)
def process_result_value(self, value, dialect):
return int(value)
@property
def python_type(self):
return int
TINY_TYPE = SmallInteger # -32768 to 32767
MEDIUM_TYPE = Integer # -2147483648 to 2147483647
HUGE_TYPE = NumInt(precision=20, scale=0) # up to 20 digits
else:
class TextInt(TypeDecorator):
'''Modify Text type for integers'''
impl = Text
def process_bind_param(self, value, dialect):
return str(value)
def process_result_value(self, value, dialect):
return int(value)
TINY_TYPE = SmallInteger
MEDIUM_TYPE = Integer
HUGE_TYPE = TextInt
if config.SPAWN_ID_INT:
ID_TYPE = BigInteger
else:
ID_TYPE = String(11)
def get_engine():
return create_engine(DB_ENGINE)
def get_engine_name(session):
return session.connection().engine.name
def combine_key(sighting):
return sighting['encounter_id'], sighting['spawn_id']
Base = declarative_base()
class Bounds:
if config.BOUNDARIES:
boundaries = config.BOUNDARIES
@classmethod
def contain(cls, p):
return cls.boundaries.contains(Point(p))
elif config.STAY_WITHIN_MAP:
north = max(config.MAP_START[0], config.MAP_END[0])
south = min(config.MAP_START[0], config.MAP_END[0])
east = max(config.MAP_START[1], config.MAP_END[1])
west = min(config.MAP_START[1], config.MAP_END[1])
@classmethod
def contain(cls, p):
lat, lon = p
return (cls.south <= lat <= cls.north and
cls.west <= lon <= cls.east)
else:
@staticmethod
def contain(p):
return True
class SightingCache(object):
"""Simple cache for storing actual sightings
It's used in order not to make as many queries to the database.
It's also capable of purging old entries.
"""
def __init__(self):
self.store = {}
self.spawns = set()
def add(self, sighting):
self.store[combine_key(sighting)] = sighting['expire_timestamp']
self.spawns.add(sighting['spawn_id'])
def __contains__(self, raw_sighting):
expire_timestamp = self.store.get(combine_key(raw_sighting))
if not expire_timestamp:
return False
within_range = (
expire_timestamp > raw_sighting['expire_timestamp'] - 1 and
expire_timestamp < raw_sighting['expire_timestamp'] + 1
)
return within_range
def clean_expired(self):
to_remove = []
for key, timestamp in self.store.items():
if time.time() > timestamp:
to_remove.append(key)
try:
self.spawns.remove(key[1])
except KeyError:
pass
for key in to_remove:
del self.store[key]
class MysteryCache(object):
"""Simple cache for storing Pokemon with unknown expiration times
It's used in order not to make as many queries to the database.
It's also capable of purging old entries.
"""
def __init__(self):
self.store = {}
def add(self, sighting):
self.store[combine_key(sighting)] = [sighting['seen']] * 2
def __contains__(self, raw_sighting):
key = combine_key(raw_sighting)
try:
first, last = self.store[key]
except (KeyError, TypeError):
return False
new_time = raw_sighting['seen']
if new_time > last:
self.store[key][1] = new_time
return True
def clean_expired(self, session):
to_remove = []
for key, times in self.store.items():
first, last = times
if first < time.time() - 3600:
to_remove.append(key)
if last == first:
continue
encounter_id, spawn_id = key
encounter = session.query(Mystery) \
.filter(Mystery.spawn_id == spawn_id) \
.filter(Mystery.encounter_id == encounter_id) \
.first()
if not encounter:
continue
hour = encounter.first_seen - (encounter.first_seen % 3600)
encounter.last_seconds = last - hour
encounter.seen_range = last - first
if to_remove:
try:
session.commit()
except DBAPIError:
session.rollback()
for key in to_remove:
del self.store[key]
class FortCache(object):
"""Simple cache for storing fort sightings"""
def __init__(self):
self.store = {}
def add(self, sighting):
if sighting['type'] == 'pokestop':
self.store[sighting['external_id']] = True
else:
self.store[sighting['external_id']] = (
sighting['team'],
sighting['prestige'],
sighting['guard_pokemon_id'],
)
def __contains__(self, sighting):
params = self.store.get(sighting['external_id'])
if not params:
return False
if sighting['type'] == 'pokestop':
return True
is_the_same = (
params[0] == sighting['team'] and
params[1] == sighting['prestige'] and
params[2] == sighting['guard_pokemon_id']
)
return is_the_same
SIGHTING_CACHE = SightingCache()
MYSTERY_CACHE = MysteryCache()
FORT_CACHE = FortCache()
class Sighting(Base):
__tablename__ = 'sightings'
id = Column(Integer, primary_key=True)
pokemon_id = Column(TINY_TYPE)
spawn_id = Column(ID_TYPE)
expire_timestamp = Column(Integer, index=True)
encounter_id = Column(HUGE_TYPE, index=True)
normalized_timestamp = Column(Integer)
lat = Column(Float)
lon = Column(Float)
atk_iv = Column(TINY_TYPE)
def_iv = Column(TINY_TYPE)
sta_iv = Column(TINY_TYPE)
move_1 = Column(SmallInteger)
move_2 = Column(SmallInteger)
__table_args__ = (
UniqueConstraint(
'encounter_id',
'expire_timestamp',
name='timestamp_encounter_id_unique'
),
)
class Mystery(Base):
__tablename__ = 'mystery_sightings'
id = Column(Integer, primary_key=True)
pokemon_id = Column(TINY_TYPE)
spawn_id = Column(ID_TYPE, index=True)
encounter_id = Column(HUGE_TYPE, index=True)
lat = Column(Float)
lon = Column(Float)
first_seen = Column(Integer, index=True)
first_seconds = Column(SmallInteger)
last_seconds = Column(SmallInteger)
seen_range = Column(SmallInteger)
atk_iv = Column(TINY_TYPE)
def_iv = Column(TINY_TYPE)
sta_iv = Column(TINY_TYPE)
move_1 = Column(SmallInteger)
move_2 = Column(SmallInteger)
__table_args__ = (
UniqueConstraint(
'encounter_id',
'spawn_id',
name='unique_encounter'
),
)
class Spawnpoint(Base):
__tablename__ = 'spawnpoints'
id = Column(Integer, primary_key=True)
spawn_id = Column(ID_TYPE, unique=True, index=True)
despawn_time = Column(SmallInteger, index=True)
lat = Column(Float)
lon = Column(Float)
alt = Column(SmallInteger)
updated = Column(Integer, index=True)
duration = Column(TINY_TYPE)
class Fort(Base):
__tablename__ = 'forts'
id = Column(Integer, primary_key=True)
external_id = Column(String(35), unique=True)
lat = Column(Float, index=True)
lon = Column(Float, index=True)
sightings = relationship(
'FortSighting',
backref='fort',
order_by='FortSighting.last_modified'
)
class FortSighting(Base):
__tablename__ = 'fort_sightings'
id = Column(Integer, primary_key=True)
fort_id = Column(Integer, ForeignKey('forts.id'))
last_modified = Column(Integer)
team = Column(TINY_TYPE)
prestige = Column(MEDIUM_TYPE)
guard_pokemon_id = Column(TINY_TYPE)
__table_args__ = (
UniqueConstraint(
'fort_id',
'last_modified',
name='fort_id_last_modified_unique'
),
)
class Pokestop(Base):
__tablename__ = 'pokestops'
id = Column(Integer, primary_key=True)
external_id = Column(String(35), unique=True)
lat = Column(Float, index=True)
lon = Column(Float, index=True)
Session = sessionmaker(bind=get_engine())
def get_spawns(session):
spawns = session.query(Spawnpoint)
mysteries = set()
spawns_dict = {}
despawn_times = {}
altitudes = {}
known_points = set()
for spawn in spawns:
point = (spawn.lat, spawn.lon)
# skip if point is not within boundaries (if applicable)
if not Bounds.contain(point):
continue
rounded = utils.round_coords(point, 3)
altitudes[rounded] = spawn.alt
if not spawn.updated or spawn.updated <= config.LAST_MIGRATION:
mysteries.add(point)
continue
if spawn.duration == 60:
spawn_time = spawn.despawn_time
else:
spawn_time = (spawn.despawn_time + 1800) % 3600
despawn_times[spawn.spawn_id] = spawn.despawn_time
spawns_dict[spawn.spawn_id] = (point, spawn_time)
if config.MORE_POINTS:
known_points.add(point)
spawns = OrderedDict(sorted(spawns_dict.items(), key=lambda k: k[1][1]))
return spawns, despawn_times, mysteries, altitudes, known_points
def normalize_timestamp(timestamp):
return (timestamp // 120) * 120
def get_since():
"""Returns 'since' timestamp that should be used for filtering"""
return time.mktime(config.REPORT_SINCE.timetuple())
def get_since_query_part(where=True):
"""Returns WHERE part of query filtering records before set date"""
if config.REPORT_SINCE:
return '{noun} expire_timestamp > {since}'.format(
noun='WHERE' if where else 'AND',
since=get_since(),
)
return ''
def add_sighting(session, pokemon):
# Check if there isn't the same entry already
if pokemon in SIGHTING_CACHE:
return
existing = session.query(exists().where(and_(
Sighting.expire_timestamp == pokemon['expire_timestamp'],
Sighting.encounter_id == pokemon['encounter_id']))
).scalar()
if existing:
SIGHTING_CACHE.add(pokemon)
return
obj = Sighting(
pokemon_id=pokemon['pokemon_id'],
spawn_id=pokemon['spawn_id'],
encounter_id=pokemon['encounter_id'],
expire_timestamp=pokemon['expire_timestamp'],
normalized_timestamp=normalize_timestamp(pokemon['expire_timestamp']),
lat=pokemon['lat'],
lon=pokemon['lon'],
atk_iv=pokemon.get('individual_attack'),
def_iv=pokemon.get('individual_defense'),
sta_iv=pokemon.get('individual_stamina'),
move_1=pokemon.get('move_1'),
move_2=pokemon.get('move_2')
)
session.add(obj)
SIGHTING_CACHE.add(pokemon)
def add_spawnpoint(session, pokemon, spawns):
# Check if the same entry already exists
spawn_id = pokemon['spawn_id']
new_time = pokemon['expire_timestamp'] % 3600
existing_time = spawns.get_despawn_seconds(spawn_id)
point = (pokemon['lat'], pokemon['lon'])
if new_time == existing_time:
return
existing = session.query(Spawnpoint) \
.filter(Spawnpoint.spawn_id == spawn_id) \
.first()
now = round(time.time())
if existing:
existing.updated = now
if (existing.despawn_time is None or
existing.updated < config.LAST_MIGRATION):
widest = get_widest_range(session, spawn_id)
if widest and widest > 1710:
existing.duration = 60
elif new_time == existing.despawn_time:
return
existing.despawn_time = new_time
spawns.add_despawn(spawn_id, new_time)
else:
altitude = spawns.get_altitude(point)
spawns.add_despawn(spawn_id, new_time)
widest = get_widest_range(session, spawn_id)
if widest and widest > 1710:
duration = 60
else:
duration = None
obj = Spawnpoint(
spawn_id=spawn_id,
despawn_time=new_time,
lat=pokemon['lat'],
lon=pokemon['lon'],
alt=altitude,
updated=now,
duration=duration
)
session.add(obj)
spawns.add_known(point)
def add_mystery_spawnpoint(session, pokemon, spawns):
# Check if the same entry already exists
spawn_id = pokemon['spawn_id']
point = (pokemon['lat'], pokemon['lon'])
if spawns.db_has(point):
return
existing = session.query(exists().where(
Spawnpoint.spawn_id == spawn_id)).scalar()
if existing:
return
altitude = spawns.get_altitude(point)
obj = Spawnpoint(
spawn_id=spawn_id,
despawn_time=None,
lat=pokemon['lat'],
lon=pokemon['lon'],
alt=altitude,
updated=0,
duration=None
)
session.add(obj)
if Bounds.contain(point):
spawns.add_mystery(point)
def add_mystery(session, pokemon, spawns):
if pokemon in MYSTERY_CACHE:
return
add_mystery_spawnpoint(session, pokemon, spawns)
existing = session.query(Mystery) \
.filter(Mystery.encounter_id == pokemon['encounter_id']) \
.filter(Mystery.spawn_id == pokemon['spawn_id']) \
.first()
if existing:
key = combine_key(pokemon)
MYSTERY_CACHE.store[key] = [existing.first_seen, pokemon['seen']]
return
seconds = pokemon['seen'] % 3600
obj = Mystery(
pokemon_id=pokemon['pokemon_id'],
spawn_id=pokemon['spawn_id'],
encounter_id=pokemon['encounter_id'],
lat=pokemon['lat'],
lon=pokemon['lon'],
first_seen=pokemon['seen'],
first_seconds=seconds,
last_seconds=seconds,
seen_range=0,
atk_iv=pokemon.get('individual_attack'),
def_iv=pokemon.get('individual_defense'),
sta_iv=pokemon.get('individual_stamina'),
move_1=pokemon.get('move_1'),
move_2=pokemon.get('move_2')
)
session.add(obj)
MYSTERY_CACHE.add(pokemon)
def add_fort_sighting(session, raw_fort):
if raw_fort in FORT_CACHE:
return
# Check if fort exists
fort = session.query(Fort) \
.filter(Fort.external_id == raw_fort['external_id']) \
.first()
if not fort:
fort = Fort(
external_id=raw_fort['external_id'],
lat=raw_fort['lat'],
lon=raw_fort['lon'],
)
session.add(fort)
if fort.id:
existing = session.query(exists().where(and_(
FortSighting.fort_id == fort.id,
FortSighting.team == raw_fort['team'],
FortSighting.prestige == raw_fort['prestige'],
FortSighting.guard_pokemon_id == raw_fort['guard_pokemon_id']
))).scalar()
if existing:
# Why is it not in the cache? It should be there!
FORT_CACHE.add(raw_fort)
return
obj = FortSighting(
fort=fort,
team=raw_fort['team'],
prestige=raw_fort['prestige'],
guard_pokemon_id=raw_fort['guard_pokemon_id'],
last_modified=raw_fort['last_modified'],
)
session.add(obj)
FORT_CACHE.add(raw_fort)
def add_pokestop(session, raw_pokestop):
if raw_pokestop in FORT_CACHE:
return
pokestop = session.query(exists().where(
Pokestop.external_id == raw_pokestop['external_id'])).scalar()
if pokestop:
FORT_CACHE.add(raw_pokestop)
return
pokestop = Pokestop(
external_id=raw_pokestop['external_id'],
lat=raw_pokestop['lat'],
lon=raw_pokestop['lon'],
)
session.add(pokestop)
FORT_CACHE.add(raw_pokestop)
def get_sightings(session):
return session.query(Sighting) \
.filter(Sighting.expire_timestamp > time.time()) \
.all()
def get_spawn_points(session):
return session.query(Spawnpoint).all()
def get_pokestops(session):
return session.query(Pokestop).all()
def get_forts(session):
if get_engine_name(session) == 'sqlite':
# SQLite version is slooooooooooooow when compared to MySQL
where = '''
WHERE fs.fort_id || '-' || fs.last_modified IN (
SELECT fort_id || '-' || MAX(last_modified)
FROM fort_sightings
GROUP BY fort_id
)
'''
else:
where = '''
WHERE (fs.fort_id, fs.last_modified) IN (
SELECT fort_id, MAX(last_modified)
FROM fort_sightings
GROUP BY fort_id
)
'''
query = session.execute('''
SELECT
fs.fort_id,
fs.id,
fs.team,
fs.prestige,
fs.guard_pokemon_id,
fs.last_modified,
f.lat,
f.lon
FROM fort_sightings fs
JOIN forts f ON f.id=fs.fort_id
{where}
'''.format(where=where))
return query.fetchall()
def get_session_stats(session):
query = session.query(func.min(Sighting.expire_timestamp),
func.max(Sighting.expire_timestamp))
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
min_max_result = query.one()
length_hours = (min_max_result[1] - min_max_result[0]) // 3600
if length_hours == 0:
length_hours = 1
# Convert to datetime
return {
'start': datetime.fromtimestamp(min_max_result[0]),
'end': datetime.fromtimestamp(min_max_result[1]),
'length_hours': length_hours
}
def get_despawn_time(session, spawn_id):
spawn_time = session.query(Spawnpoint.despawn_time) \
.filter(Spawnpoint.spawn_id == spawn_id) \
.filter(Spawnpoint.updated > config.LAST_MIGRATION) \
.scalar()
return spawn_time
def get_first_last(session, spawn_id):
result = session.query(func.min(Mystery.first_seconds), func.max(Mystery.last_seconds)) \
.filter(Mystery.spawn_id == spawn_id) \
.filter(Mystery.first_seen > config.LAST_MIGRATION) \
.first()
return result
def get_widest_range(session, spawn_id):
largest = session.query(func.max(Mystery.seen_range)) \
.filter(Mystery.spawn_id == spawn_id) \
.filter(Mystery.first_seen > config.LAST_MIGRATION) \
.scalar()
return largest
def estimate_remaining_time(session, spawn_id, seen=None):
first, last = get_first_last(session, spawn_id)
if not first:
return 90, 1800
if seen:
if seen > last:
last = seen
elif seen < first:
first = seen
if last - first > 1710:
possible = (first + 90, last + 90, first + 1800, last + 1800)
estimates = []
for possibility in possible:
estimates.append(utils.time_until_time(possibility, seen))
soonest = min(estimates)
latest = max(estimates)
return soonest, latest
soonest = last + 90
latest = first + 1800
soonest = utils.time_until_time(soonest, seen)
latest = utils.time_until_time(latest, seen)
return soonest, latest
def get_punch_card(session):
query = session.query(cast(Sighting.expire_timestamp / 300, Integer).label('ts_date'), func.count('ts_date')) \
.group_by('ts_date') \
.order_by('ts_date')
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
results = tuple(query)
results_dict = {r[0]: r[1] for r in results}
filled = []
for row_no, i in enumerate(range(int(results[0][0]), int(results[-1][0]))):
filled.append((row_no, results_dict.get(i, 0)))
return filled
def get_top_pokemon(session, count=30, order='DESC'):
query = session.query(Sighting.pokemon_id, func.count(Sighting.pokemon_id).label('how_many')) \
.group_by(Sighting.pokemon_id)
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
if order == 'DESC':
query = query.order_by(desc('how_many')).limit(count)
else:
query = query.order_by(asc('how_many')).limit(count)
return query.all()
def get_pokemon_ranking(session):
ranking = []
query = session.query(Sighting.pokemon_id, func.count(Sighting.pokemon_id).label('how_many')) \
.group_by(Sighting.pokemon_id)
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
query = query.order_by(asc('how_many'))
db_ids = [r[0] for r in query]
for pokemon_id in range(1, 152):
if pokemon_id not in db_ids:
ranking.append(pokemon_id)
ranking.extend(db_ids)
return ranking
def get_sightings_per_pokemon(session):
query = session.query(Sighting.pokemon_id, func.count(Sighting.pokemon_id).label('how_many')) \
.group_by(Sighting.pokemon_id) \
.order_by('how_many')
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
return OrderedDict(query.all())
def get_rare_pokemon(session):
result = []
for pokemon_id in config.RARE_IDS:
query = session.query(Sighting) \
.filter(Sighting.pokemon_id == pokemon_id)
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
count = query.count()
if count > 0:
result.append((pokemon_id, count))
return result
def get_nonexistent_pokemon(session):
result = []
query = session.execute('''
SELECT DISTINCT pokemon_id FROM sightings
{report_since}
'''.format(report_since=get_since_query_part()))
db_ids = [r[0] for r in query.fetchall()]
for pokemon_id in range(1, 152):
if pokemon_id not in db_ids:
result.append(pokemon_id)
return result
def get_all_sightings(session, pokemon_ids):
# TODO: rename this and get_sightings
query = session.query(Sighting) \
.filter(Sighting.pokemon_id.in_(pokemon_ids))
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
return query.all()
def get_spawns_per_hour(session, pokemon_id):
if get_engine_name(session) == 'sqlite':
ts_hour = 'STRFTIME("%H", expire_timestamp)'
elif get_engine_name(session) == 'postgresql':
ts_hour = "TO_CHAR(TO_TIMESTAMP(expire_timestamp), 'HH24')"
else:
ts_hour = 'HOUR(FROM_UNIXTIME(expire_timestamp))'
query = session.execute('''
SELECT
{ts_hour} AS ts_hour,
COUNT(*) AS how_many
FROM sightings
WHERE pokemon_id = {pokemon_id}
{report_since}
GROUP BY ts_hour
ORDER BY ts_hour
'''.format(
pokemon_id=pokemon_id,
ts_hour=ts_hour,
report_since=get_since_query_part(where=False)
))
results = []
for result in query.fetchall():
results.append((
{
'v': [int(result[0]), 30, 0],
'f': '{}:00 - {}:00'.format(
int(result[0]), int(result[0]) + 1
),
},
result[1]
))
return results
def get_total_spawns_count(session, pokemon_id):
query = session.query(Sighting) \
.filter(Sighting.pokemon_id == pokemon_id)
if config.REPORT_SINCE:
query = query.filter(Sighting.expire_timestamp > get_since())
return query.count()
def get_all_spawn_coords(session, pokemon_id=None):
points = session.query(Sighting.lat, Sighting.lon)
if pokemon_id:
points = points.filter(Sighting.pokemon_id == int(pokemon_id))
if config.REPORT_SINCE:
points = points.filter(Sighting.expire_timestamp > get_since())
return points.all()
|
|
#!/bin/env python
import argparse
import collections
import pprint
import pickle
import os
import re
import cbor
import logging
logr = logging.getLogger( __name__ )
# Exception type is part of the error signature
err_type_re_signature = {
"<type 'exceptions.OSError'>": re.compile( '([^:]+):?' ),
"<type 'exceptions.IOError'>": re.compile( '([^:]+):?' ),
"<class 'runcmd.Run_Cmd_Error'>":
re.compile( '<Run_Cmd_Error \((code=.+msg=[^:/]+).*:(.+)\n' ),
"<class 'billiard.exceptions.SoftTimeLimitExceeded'>": re.compile( '(.*)' ),
}
# Traceback lines to skip in error signature
re_traceback_ignore = re.compile(
'/(subprocess|os|genericpath|posixpath).py", '
'|' 'logging/__init__.py", '
'|' 'billiard/pool.py", '
'|' 'celery/app/(task|trace).py", ' )
def process_cmdline():
parser = argparse.ArgumentParser()
parser.add_argument( 'infile' )
parser.add_argument( '-g', '--grep',
help='Like fgrep on raw text of each error' )
picklegroup = parser.add_argument_group( title='Pickling options',
description="""Specifying -n and -p at the same time will cause the source
file to be re-parsed and a new pickle file created.""")
picklegroup.add_argument( '--nopicklein', '-n', action='store_false', dest='picklein',
help="Don't read from pickled data file" )
picklegroup.add_argument( '--pickle', '-p', action='store_true', dest='pickleout',
help='Save pickled data in INFILE.pickle, clobbering an existing file' )
outputgroup = parser.add_argument_group( title='Output Details' )
outputgroup.add_argument( '--message', '-m', action='store_true',
help="Show one-line message for each instance of error type" )
outputgroup.add_argument( '--details', '-d', action='store_true',
help="Show details for each instance of error type" )
outputgroup.add_argument( '--raw', '-r', action='store_true',
help="Show raw exception for each error type." )
outputgroup.add_argument( '-a', '--attr', action='append', dest='attrlist',
help="Show specified attrs from each instance. Can be present multiple times." )
parser.add_argument( '--anydetails', action='store_true',
help=argparse.SUPPRESS )
limitgroup = parser.add_mutually_exclusive_group()
limitgroup.add_argument( '--show', '-s', type=int, metavar='N',
help="Show details for error number N (in list of errors) and exit" )
limitgroup.add_argument( '--include', '-i', action='append', metavar='INC',
help="""Show only errors with type matching INC.
Can be specified multiple times.""" )
limitgroup.add_argument( '--exclude', '-e', action='append', metavar='EXC',
help="""Show errors where type does NOT match EXC.
Can be specified multiple times.""" )
default_options = {
"picklein": True,
"message": False,
"details": False,
"raw": False,
"anydetails": False,
}
parser.set_defaults( **default_options )
args = parser.parse_args()
if args.message or args.details or args.raw or args.attrlist:
args.anydetails = True
return args
def get_error_signature( rec ):
etype = rec[ 'exception_type' ]
exception = rec[ 'exception' ]
try:
re_pattern = err_type_re_signature[ etype ]
except ( KeyError ) as e:
logr.error( 'ERROR while parsing record:\n{0}\n'.format( pprint.pformat( rec ) ) )
raise e
msg = ('____ Looking for signature match in exception:\n'
'{e}\n'
'____ for exception type:\n'
'{etype}').format( e = exception, etype = etype )
logr.debug( msg )
match = re_pattern.match( exception )
if not match:
raise UserWarning( 'No match found...\n{msg}'.format( msg = msg ) )
relevant_parts = [ etype, ' ' ]
logr.debug( 'Matches: {m}'.format( m = pprint.pformat( match.groups() ) ) )
relevant_parts.append( ''.join( match.groups() ) + '\n' )
for L in rec[ 'traceback' ].splitlines():
if L.startswith( ' File ' ) \
and not re_traceback_ignore.search( L ):
relevant_parts.append( L + '\n' )
return ''.join( relevant_parts )
def process_error_record( errdict, rec ):
e_sig = get_error_signature( rec )
e_msg = rec[ 'exception' ]
# e_details = rec
if e_sig not in errdict:
errdict[ e_sig ] = { 'instances': [] }
errdict[ e_sig ][ 'instances' ].append( rec )
def process_file( infile ):
errors = collections.OrderedDict()
with open( infile, 'rb' ) as f:
try:
while True:
rec = cbor.load( f )
process_error_record( errors, rec )
except ( EOFError ):
pass
return errors
def print_single_error( num, sig, data, args ):
qty = len( data[ 'instances' ] )
print( '' )
print( 'Error # {0:02d} Qty:{1}'.format( num, qty ) )
print( '='*22 )
print( sig )
if args.anydetails:
for i in data[ 'instances' ]:
print( '-'*50 )
if args.attrlist:
outfmt = '{1}'
if len( args.attrlist ) > 1:
outfmt = '{0} {1}'
for a in args.attrlist:
val = 'None'
if a in i:
val = i[a]
print( outfmt.format( a, val ) )
if args.message:
print( i[ 'exception' ] )
if args.details:
for k in [ 'args' ]:
print( '{k}: {v}'.format( k=k, v=i[ k ] ) )
if args.raw:
pprint.pprint( i )
def print_errors( errdict, args ):
err_indices = { i: e for i, e in enumerate( errdict, start=1) }
if args.show:
# Show only the requested error
e = err_indices[ args.show ]
print_single_error( args.show, e, errdict[e], args )
else:
total_error_count = 0
for i, e_sig in err_indices.iteritems():
# Print errors by default
print_ok = True
if args.include:
# limit errors by inclusion
print_ok = False
if any( x in e_sig for x in args.include ):
print_ok = True
if args.exclude:
# limit errors by exclusion
print_ok = True
if any( x in e_sig for x in args.exclude ):
print_ok = False
if print_ok:
qty = len( errdict[ e_sig ][ 'instances' ] )
total_error_count += qty
print_single_error( i, e_sig, errdict[ e_sig ], args )
print( "" )
fmt = "Total Error Count: {0}"
sz = len( fmt ) - 3 + len( str( total_error_count ) )
print( '='*sz )
print( fmt.format( total_error_count ) )
print( '='*sz )
def grep_errors( errors, args ):
for k,v in errors.iteritems():
for rec in v[ 'instances' ]:
record_as_string = pprint.pformat( rec )
if args.grep in record_as_string:
print record_as_string
if __name__ == "__main__":
loglvl = logging.WARNING
logging.basicConfig( level=loglvl )
args = process_cmdline()
head, tail = os.path.split( args.infile )
pickle_fn = os.path.join( head, '{0}.pickle'.format( tail ) )
if args.picklein and os.path.exists( pickle_fn ):
with open( pickle_fn, 'rb' ) as f:
errors = pickle.load( f )
else:
errors = process_file( args.infile )
if args.grep:
grep_errors( errors, args )
else:
print_errors( errors, args )
if args.pickleout:
with open( pickle_fn, 'wb' ) as f:
pickle.dump( errors, f )
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_by_subscription_request(
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.LabServices/labPlans')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
lab_plan_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labPlanName": _SERIALIZER.url("lab_plan_name", lab_plan_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
lab_plan_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labPlanName": _SERIALIZER.url("lab_plan_name", lab_plan_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
lab_plan_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labPlanName": _SERIALIZER.url("lab_plan_name", lab_plan_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
lab_plan_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labPlanName": _SERIALIZER.url("lab_plan_name", lab_plan_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_save_image_request_initial(
subscription_id: str,
resource_group_name: str,
lab_plan_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-11-15-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}/saveImage')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"labPlanName": _SERIALIZER.url("lab_plan_name", lab_plan_name, 'str', max_length=100, min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class LabPlansOperations(object):
"""LabPlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.labservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_subscription(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.PagedLabPlans"]:
"""Get all lab plans for a subscription.
Returns a list of all lab plans within a subscription.
:param filter: The filter to apply to the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PagedLabPlans or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.labservices.models.PagedLabPlans]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedLabPlans"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PagedLabPlans", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.LabServices/labPlans'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.PagedLabPlans"]:
"""Get all lab plans for a subscription and resource group.
Returns a list of all lab plans for a subscription and resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PagedLabPlans or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.labservices.models.PagedLabPlans]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PagedLabPlans"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PagedLabPlans", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
lab_plan_name: str,
**kwargs: Any
) -> "_models.LabPlan":
"""Retrieves a Lab Plan resource.
Retrieves the properties of a Lab Plan.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI.
:type lab_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LabPlan, or the result of cls(response)
:rtype: ~azure.mgmt.labservices.models.LabPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LabPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('LabPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
lab_plan_name: str,
body: "_models.LabPlan",
**kwargs: Any
) -> "_models.LabPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LabPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'LabPlan')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LabPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LabPlan', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('LabPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
lab_plan_name: str,
body: "_models.LabPlan",
**kwargs: Any
) -> LROPoller["_models.LabPlan"]:
"""Updates or creates a Lab Plan resource.
Operation to create or update a Lab Plan resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI.
:type lab_plan_name: str
:param body: The request body.
:type body: ~azure.mgmt.labservices.models.LabPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either LabPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.labservices.models.LabPlan]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LabPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LabPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'original-uri'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
lab_plan_name: str,
body: "_models.LabPlanUpdate",
**kwargs: Any
) -> "_models.LabPlan":
cls = kwargs.pop('cls', None) # type: ClsType["_models.LabPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'LabPlanUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LabPlan', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('LabPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
lab_plan_name: str,
body: "_models.LabPlanUpdate",
**kwargs: Any
) -> LROPoller["_models.LabPlan"]:
"""Updates a Lab Plan resource.
Operation to update a Lab Plan resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI.
:type lab_plan_name: str
:param body: The request body.
:type body: ~azure.mgmt.labservices.models.LabPlanUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either LabPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.labservices.models.LabPlan]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LabPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LabPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
lab_plan_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
lab_plan_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a Lab Plan resource.
Operation to delete a Lab Plan resource. Deleting a lab plan does not delete labs associated
with a lab plan, nor does it delete shared images added to a gallery via the lab plan
permission container.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI.
:type lab_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}'} # type: ignore
def _save_image_initial(
self,
resource_group_name: str,
lab_plan_name: str,
body: "_models.SaveImageBody",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(body, 'SaveImageBody')
request = build_save_image_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
content_type=content_type,
json=_json,
template_url=self._save_image_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_save_image_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}/saveImage'} # type: ignore
@distributed_trace
def begin_save_image(
self,
resource_group_name: str,
lab_plan_name: str,
body: "_models.SaveImageBody",
**kwargs: Any
) -> LROPoller[None]:
"""Save an image from a lab VM to the attached shared image gallery.
Saves an image from a lab VM to the attached shared image gallery.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param lab_plan_name: The name of the lab plan that uniquely identifies it within containing
resource group. Used in resource URIs and in UI.
:type lab_plan_name: str
:param body: The request body.
:type body: ~azure.mgmt.labservices.models.SaveImageBody
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._save_image_initial(
resource_group_name=resource_group_name,
lab_plan_name=lab_plan_name,
body=body,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_save_image.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.LabServices/labPlans/{labPlanName}/saveImage'} # type: ignore
|
|
"""The Fronius integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
import logging
from typing import Final, TypeVar
from pyfronius import Fronius, FroniusError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_MODEL, ATTR_SW_VERSION, CONF_HOST, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import DeviceInfo
from .const import DOMAIN, SOLAR_NET_ID_SYSTEM, FroniusDeviceInfo
from .coordinator import (
FroniusCoordinatorBase,
FroniusInverterUpdateCoordinator,
FroniusLoggerUpdateCoordinator,
FroniusMeterUpdateCoordinator,
FroniusOhmpilotUpdateCoordinator,
FroniusPowerFlowUpdateCoordinator,
FroniusStorageUpdateCoordinator,
)
_LOGGER: Final = logging.getLogger(__name__)
PLATFORMS: Final = [Platform.SENSOR]
FroniusCoordinatorType = TypeVar("FroniusCoordinatorType", bound=FroniusCoordinatorBase)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up fronius from a config entry."""
host = entry.data[CONF_HOST]
fronius = Fronius(async_get_clientsession(hass), host)
solar_net = FroniusSolarNet(hass, entry, fronius)
await solar_net.init_devices()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = solar_net
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
solar_net = hass.data[DOMAIN].pop(entry.entry_id)
while solar_net.cleanup_callbacks:
solar_net.cleanup_callbacks.pop()()
return unload_ok
class FroniusSolarNet:
"""The FroniusSolarNet class routes received values to sensor entities."""
def __init__(
self, hass: HomeAssistant, entry: ConfigEntry, fronius: Fronius
) -> None:
"""Initialize FroniusSolarNet class."""
self.hass = hass
self.cleanup_callbacks: list[Callable[[], None]] = []
self.config_entry = entry
self.coordinator_lock = asyncio.Lock()
self.fronius = fronius
self.host: str = entry.data[CONF_HOST]
# entry.unique_id is either logger uid or first inverter uid if no logger available
# prepended by "solar_net_" to have individual device for whole system (power_flow)
self.solar_net_device_id = f"solar_net_{entry.unique_id}"
self.system_device_info: DeviceInfo | None = None
self.inverter_coordinators: list[FroniusInverterUpdateCoordinator] = []
self.logger_coordinator: FroniusLoggerUpdateCoordinator | None = None
self.meter_coordinator: FroniusMeterUpdateCoordinator | None = None
self.ohmpilot_coordinator: FroniusOhmpilotUpdateCoordinator | None = None
self.power_flow_coordinator: FroniusPowerFlowUpdateCoordinator | None = None
self.storage_coordinator: FroniusStorageUpdateCoordinator | None = None
async def init_devices(self) -> None:
"""Initialize DataUpdateCoordinators for SolarNet devices."""
if self.config_entry.data["is_logger"]:
self.logger_coordinator = FroniusLoggerUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_logger_{self.host}",
)
await self.logger_coordinator.async_config_entry_first_refresh()
# _create_solar_net_device uses data from self.logger_coordinator when available
self.system_device_info = await self._create_solar_net_device()
_inverter_infos = await self._get_inverter_infos()
for inverter_info in _inverter_infos:
coordinator = FroniusInverterUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_inverter_{inverter_info.solar_net_id}_{self.host}",
inverter_info=inverter_info,
)
await coordinator.async_config_entry_first_refresh()
self.inverter_coordinators.append(coordinator)
self.meter_coordinator = await self._init_optional_coordinator(
FroniusMeterUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_meters_{self.host}",
)
)
self.ohmpilot_coordinator = await self._init_optional_coordinator(
FroniusOhmpilotUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_ohmpilot_{self.host}",
)
)
self.power_flow_coordinator = await self._init_optional_coordinator(
FroniusPowerFlowUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_power_flow_{self.host}",
)
)
self.storage_coordinator = await self._init_optional_coordinator(
FroniusStorageUpdateCoordinator(
hass=self.hass,
solar_net=self,
logger=_LOGGER,
name=f"{DOMAIN}_storages_{self.host}",
)
)
async def _create_solar_net_device(self) -> DeviceInfo:
"""Create a device for the Fronius SolarNet system."""
solar_net_device: DeviceInfo = DeviceInfo(
configuration_url=self.fronius.url,
identifiers={(DOMAIN, self.solar_net_device_id)},
manufacturer="Fronius",
name="SolarNet",
)
if self.logger_coordinator:
_logger_info = self.logger_coordinator.data[SOLAR_NET_ID_SYSTEM]
solar_net_device[ATTR_MODEL] = _logger_info["product_type"]["value"]
solar_net_device[ATTR_SW_VERSION] = _logger_info["software_version"][
"value"
]
device_registry = await dr.async_get_registry(self.hass)
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
**solar_net_device,
)
return solar_net_device
async def _get_inverter_infos(self) -> list[FroniusDeviceInfo]:
"""Get information about the inverters in the SolarNet system."""
try:
_inverter_info = await self.fronius.inverter_info()
except FroniusError as err:
raise ConfigEntryNotReady from err
inverter_infos: list[FroniusDeviceInfo] = []
for inverter in _inverter_info["inverters"]:
solar_net_id = inverter["device_id"]["value"]
unique_id = inverter["unique_id"]["value"]
device_info = DeviceInfo(
identifiers={(DOMAIN, unique_id)},
manufacturer=inverter["device_type"].get("manufacturer", "Fronius"),
model=inverter["device_type"].get(
"model", inverter["device_type"]["value"]
),
name=inverter.get("custom_name", {}).get("value"),
via_device=(DOMAIN, self.solar_net_device_id),
)
inverter_infos.append(
FroniusDeviceInfo(
device_info=device_info,
solar_net_id=solar_net_id,
unique_id=unique_id,
)
)
return inverter_infos
@staticmethod
async def _init_optional_coordinator(
coordinator: FroniusCoordinatorType,
) -> FroniusCoordinatorType | None:
"""Initialize an update coordinator and return it if devices are found."""
try:
await coordinator.async_config_entry_first_refresh()
except ConfigEntryNotReady:
# ConfigEntryNotReady raised form FroniusError / KeyError in
# DataUpdateCoordinator if request not supported by the Fronius device
return None
# if no device for the request is installed an empty dict is returned
if not coordinator.data:
return None
return coordinator
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 02 11:26:02 2015
@author: ISTI_EW
test fmm
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from six import string_types
import skfmm
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import matplotlib.gridspec as gridspec
import chabo.velocitygrid as vg
import chabo.inputs as inp
import sys
import tables
import os
import scipy
import scipy.ndimage
from collections import namedtuple
from scipy.ndimage.interpolation import zoom
from scipy.linalg import norm
import pudb
import warnings
# warnings.filterwarnings('error')
class Chabo(object):
"""
Main container class for Chabo. Defines geometry, sets velocities,
and embeds stations/events\n
Parameters
----------
xmin : list-like (list, tuple, np.ndarray)
The minimum geometric coordinates for the grid space. Either local
coordinates (in meters) or lat lons can be used.
xmin can either be length 2 or 3 (for 2d or 3d case) and must be
the same length as xmax, dx, xnum
xmax : list-like (list, tuple, np.ndarray)
Same requirements as xmin but represents upper extents of geometry
dx : list-like (list, tuple, np.ndarray)
The grid spacing in meters for either 2 or 3 dimensions.
For example, [20,15] would imply 20m in the x direction and 15m in
the y direction.
xnum : list-like (list, tuple, np.ndarray)
The number of blocks to use in each direction
latlon : bool
If true values in lims are lon, lat, elevation so they will be
converted to local coordinates first.
VelMod : input for chabo.velocityGrid to make velocity model
If a velocity model is not passed one will be crreated
vtype : str or None
The type of velocity model. Options are: '1d', '1dcsv', '2dgrid',
'2dcsv', '3dgrid', '3dcsv', '3dnll'. If None chabo will try to
determine the type automagically.
stations : pandas dataframe
Data frame with the following fields: 'ID','X','Y','Z', each row
corresponding to a different station
events : pandas dataframe
Same as stations but for events
phases : str, pandas dataframe, or none
Path to the phase file or pandas dataframe containing phase info
phase_type : str
the phase type of the velocity model (P or S)
squash : bool
If True convert all station and event coordinates to 2d if the chabo
instance is 2d.
hdf5_path : str
The path to an hdf5 store of travel time grids
Notes
------
Of the 4 geometric input parameters [xmin, xmax, dx, xnum] exactly
3 must be list like objects and of the same length, either 2 for 2
dimensions or 3 for 3 dimensions.
Acceptable combinations are:
xmin,xmax,xnum
xmin, xmax, dx
xmin,dx,xnum
"""
ray_columns = ['station', 'event', 'turningPoints', 'startTime']
spakey = ['X', 'Y', 'Z']
apkey = ['Xap', 'Yap', 'Zap']
inkey = ['Xin', 'Yin', 'Zin']
def __init__(self, xmin=None, xmax=None, dx=None, xnum=None, latlon=False,
velMod=None, vtype=None, stations=None, events=None,
phases=None, phase_type='P', squash=True, hdf5_path='.chabo.h5',
**kwargs):
# convert to local coords (cartesian) if required
self.latlon = latlon
if self.latlon:
xmin, xmax = self._convert2local(xmin, xmax)
# set geometric properties
self.xmin = xmin
mod_params = self._mesh_grid(xmin, xmax, dx, xnum)
self.X, self.xls, self.xmax, self.dx, self.xnum = mod_params
# instantiate some variables, trunicate key lists to actual dimensions
self.phase_type = phase_type
self.spakey = self.spakey[:len(self.X)] # columns for spatial dims
self.apkey = self.apkey[:len(self.X)] # columns for approx. locations
self.inkey = self.inkey[:len(self.X)] # columns for indicies
self._rms = None
# init container for travel time grids (keys are event IDs)
self.tts = {}
self.nested_tts = {} # used for velocity iterations
self.station_tts = {}
self.hdf5_path = hdf5_path
# init dataframe to contain ray paths for each event-station pair
self.rays = pd.DataFrame(columns=self.ray_columns)
## Attach required files if they were given, else set as 0
self.stations = self.get_stations(stations, **kwargs)
self.events = self.get_events(events, **kwargs)
self.phases = self.get_phases(phases, **kwargs)
self.velMod = self.get_velocity_model(velMod, vtype=vtype, **kwargs)
@property
def rms(self):
self.calc_residuals()
return self._rms
### Geometry creation and input checks
def _check_inputs(self, xmin, xmax, dx, xnum):
# make sure xmin is not greater than xmax if defined
if all([isinstance(x, (list, tuple, np.ndarray)) for x in [xmin, xmax]]):
if any([x[0] > x[1] for x in zip(xmin, xmax)]):
raise ValueError('xmin cannot be greater than xmax')
checkList = np.array([isinstance(x, (list, tuple, np.ndarray))
for x in [xmin, xmax, dx, xnum]])
if checkList.sum() != 3: # make sure exactly 3 inputs are defined
msg = ('Exactly 3 of the following variables must be defined, the '
'fourth left as none: xmin, xmax, dx, xnum')
raise ValueError(msg)
if len(set([len(x) for x in [xmin, xmax, dx, xnum] if isinstance(x,
(list, tuple,
np.ndarray))])) > 1: # all inputs equal-lengthed
msg = 'All 3 input parameters used must be the same length'
raise ValueError(msg)
# acceptable binary combos of input parameters
acceptableCombos = np.array([[1, 1, 1, 0], [1, 1, 0, 1], [1, 0, 1, 1]])
if not 3 in np.dot(acceptableCombos, checkList):
msg = ('Unsupported combination of input parameters selected, see '
'notes on Chabo class for excepted combos')
raise Exception(msg)
return checkList
def _mesh_grid(self, xmin, xmax, dx, xnum): # Mesh grids
checkList = self._check_inputs(xmin, xmax, dx, xnum)
# if xmin, xmax and xnum defined
if all(checkList == [1, 1, 0, 1]):
lims = zip(xmin, xmax, xnum) # zip parameters together
xls = [np.linspace(x[0], x[1], num=x[2]) for x in lims]
dx = [(float(x[1]) - x[0]) / x[2] for x in lims]
# if xmin, xmax, dx defined
if all(checkList == [1, 1, 1, 0]):
lims = zip(xmin, xmax, dx)
xls = [np.arange(x[0], x[1] + x[2], x[2]) for x in lims]
# Make sure no points beyond end of grid are included
xls = [x[:-1] if x[-1] > xmax[num] else x for num, x in enumerate(xls)]
xnum = [len(x) for x in xls]
# if xmin, dx and xnum defined
if all(checkList == [1, 0, 1, 1]):
lims = zip(xmin, dx, xnum)
xmax = [x[0] + x[1] * (x[2] - 1) for x in lims]
lims = zip(xmin, xmax, xnum)
xls = [np.linspace(x[0], x[1], num=x[2]) for x in lims]
X = np.meshgrid(*xls, indexing='ij')
return X, xls, xmax, dx, xnum
def _convert2local(self, lims):
# convert lat lons to local coordinates
raise Exception('Not yet implimented') # TODO impliment
###### Attach functions (Vmodel, phases, stations, events, etc.)
def get_velocity_model(self, velmod, vtype=None, **kwargs):
if velmod is None:
return None
return vg.VelocityGrid(velmod, self, vtype=vtype, **kwargs)
def attach_velocity_model(self, velmod, vtype=None, **kwargs):
self.velMod = self.get_velocity_model(velmod, vtype=vtype, **kwargs)
def get_stations(self, sfile, stype='df', **kwargs):
if sfile is None: # if None set as None
return None
if isinstance(sfile, string_types): # if str read csv
sfile = pd.read_csv(sfile)
stadf = inp.Stations(sfile, self, stype=stype, **kwargs)
if self.latlon:
stadf = self._converToLocal(stadf)
stadf.index = stadf.ID
# make sure locations are within grid
for num, row in stadf.iterrows():
self._check_bounds(row)
return self._get_approx_locations(stadf)
def attach_stations(self, sfile, stype='df', **kwargs):
self.stations = self.get_stations(sfile, stype=stype, **kwargs)
def get_events(self, efile, etype='df', **kwargs):
if efile is None:
return None
if isinstance(efile, string_types):
efile = pd.read_csv(efile)
evdf = inp.Events(efile, self, etype=etype, **kwargs)
if self.latlon:
evdf = self._converToLocal(evdf)
evdf.index = evdf.ID
# make sure locations are within grid
for num, row in evdf.iterrows():
self._check_bounds(row)
return self._get_approx_locations(evdf)
def attach_events(self, efile, etype='df', **kwargs):
self.events = self.get_events(efile, etype=etype, **kwargs)
def get_phases(self, phases, ptype='df', **kwargs):
if phases is None:
return None
if isinstance(phases, string_types):
phases = pd.read_csv(phases)
# attach phases and normalize TT colulmn by subtracting origin time
phases = inp.Phases(phases, self, ptype=ptype, **kwargs)
# temp event df
if (abs(phases.TT)).median() > 6 * 60: # determine if origins are ts
et = self.events[['O']]
et['EID'] = self.events.ID
pt = phases.merge(et, on='EID')
pt['TT'] = pt['TT'] - pt['O']
phases = pt.drop('O', axis=1)
else:
phases = phases
return phases
def attach_phases(self, phases, ptype='df', **kwargs):
self.phases = self.get_phases(phases, ptype=ptype, **kwargs)
# Check if any station or event locations are outside of grid
def _check_bounds(self, row, dimnames=['X', 'Y', 'Z']):
appdims = np.array([row[dimnames[dinum]] for dinum in
range(len(self.xmin))])
if any((appdims > self.xmax) | (appdims < self.xmin)):
msg = (('The following row of the events or stations dataframe '
'does not fit in the dimension limits of %s to %s: '
'\n%s \n ') % (self.xmin, self.xmax, row))
raise ValueError(msg)
### Embed stations/events in grid
def _get_approx_locations(self, df, grid=None):
"""
loop through the dataframe (either station or events) and find
nearest location in grid, append approximate locations and
indicies. Can pass a custom grid
"""
if grid is None:
grid = self
for num, row in df.iterrows():
coords = [row[x] for x in self.spakey]
xind = [abs(coords[num2] - grid.xls[num2]).argmin() for num2 in
range(len(grid.xls))] # index of best fitting x point in L1
for num2 in range(len(grid.xls)):
df.loc[num, self.apkey[num2]] = grid.xls[num2][xind[num2]]
df.loc[num, self.inkey[num2]] = int(xind[num2])
df.loc[num, 'embed_error'] = self._get_embed_errors(df.loc[num])
# cast dtypes
for col in df.columns:
try:
df[col] = df[col].astype(float)
except Exception:
pass
for in_col in self.inkey:
df[in_col] = df[in_col].astype(np.int)
return df.sort_index()
def _get_embed_errors(self, row):
"""
Get the error associated with embedding the station/event in a grid
(in grid distance units)
"""
return norm([row[x] - row[y] for x, y in zip(self.spakey, self.apkey)])
def id2event(self, eid):
"""
Given an event id (eid) return the row of that event
"""
try:
return self.events.loc[eid]
except IndexError:
msg = 'No events with ID %s found' % eid
raise IndexError(msg)
def ind2event(self, ind):
return self.events.iloc[ind]
def id2station(self, sid):
"""
Given an station id (sid) return the row of that station
"""
try:
return self.stations.loc[sid]
except IndexError:
msg = 'No events with ID %s found' % sid
raise IndexError(msg)
def ind2station(self, ind):
return self.stations.iloc[ind]
### Fast marching and ray tracing functions
def fast_march(self, get_rays=True, num_blocks=10):
"""
Call the fast marching method calculate travel times in each grid point
for a given event location
Parameters
----------
get_rays : bool
If true attempt to back calculate the ray paths based on
travel time grid
num_blocks : int
The number of neighbors to include for determining path
tracing back to origin
"""
# make sure that stations and events are defined
con1 = (self.events is None or not len(self.events))
con2 = (self.stations is None or not len(self.stations))
if con1 or con2:
msg = ('No events or no stations passed, check events and stations'
'dataframes')
raise ValueError(msg)
# make sure a valid velocity model is found
if self.velMod is None:
msg = 'Undefined velocity model, define it with attach_velocity_model'
raise ValueError(msg)
# If phases are attached
if self.phases is not None:
# get events for phase picks exist
eventdf = self.events[self.events.ID.isin(set(self.phases.EID))]
else: # Else use all events
eventdf = self.events
for evnum, evrow in eventdf.iterrows(): # Loop through each event
phi = self._get_phi(evrow)
tt = skfmm.travel_time(phi, self.velMod, dx=self.dx)
self.tts[evrow.ID] = tt
if get_rays: # If ray paths are to be back calculated
# if phases are attached
if self.phases is not None:
# get phases on curent event
evephases = self.phases[self.phases.EID == evrow.ID]
# Stations which have phases for current event
stadf = self.stations[self.stations.ID.isin(evephases.SID)]
else: # If no phases use all station-event pairs
stadf = self.stations
for stanum, starow in stadf.iterrows(): # loop through each station
sr = self._get_ray_paths(starow, tt, evrow, num_blocks)
self.rays.loc[len(self.rays)] = sr
def iter_models(self, vmods, parallel=True, cores=-1, output='residual'):
"""
Iterate through a series of vmodes, return travel times at stations
or residuals. Note; A new velocity grid is defined for each station
in order to minimize the number of points that have to be calculated,
The number of grid points, however, remains constaint
parameters
----------
vmods : iterator
an iterator of velocity model grids and modnumber tuple
parallel : bool
If True run all models in parallel using job lib's multiprocessing
cores : int
The number of cores to use in the calculatio
output : str
The output type, either "residuals" or "travel_times"
Returns
--------
Results are returned as a flat dataframe with the following row:
MID, SID, EID, travel_time, residual
"""
# put import here so that joblib is not required
from joblib import Parallel, delayed
# Init key vars
dx = self.dx
phi = self._get_phi(self.stations.iloc[0])
ein = [tuple(self.events[x]) for x in self.inkey]
cols = self.events.ID.astype(int).values.tolist() + ['modnum']
# get travel times at stations for each vmod
if parallel: # if use multiprocessing
out = Parallel(n_jobs=cores)(delayed(go)(vmod, phi, dx, ein, vnum)
for vmod, vnum in vmods)
else: # else old fashion way
out = []
for vmod, vnum in vmods:
out.append(go(vmod, phi, dx, ein, vnum))
# wrange into travel time df with mod num as index and eid as column
df = pd.DataFrame(out, columns=cols)
df.set_index('modnum', drop=True, inplace=True)
if output == 'travel_times': # return only travel times
return df
if output == 'residuals':
# subtract phase times from travel times for residuals
pp = self._get_tt_vector(df)
df = -df + pp
return df
else:
msg = 'output of %s not supported' % output
raise ValueError(msg)
def _get_tt_vector(self, df):
# function to get a vector of observed phases for subtracting from TT
dft = pd.DataFrame(columns=df.columns)
dft2 = self.phases[['EID', 'TT']].T
dft2.columns = dft2.loc['EID']
dft.loc['TT'] = dft2.loc['TT']
return dft.values[0]
def _get_phi(self, evrow, grid=None):
"""
Return the phi array for the skfmm
"""
if grid is None:
grid = self
phi = np.ones_like(grid.X[0])
# index of event in grid
ind = [int(evrow[x]) for x in self.inkey]
phi[tuple(ind)] = 0 # set event location to 0
return phi
def _get_ray_paths(self, starow, tt, everow, num_blocks):
# function to back calculate ray paths from station to source
trail = [] # list to append each coordinate along path
turPoints = []
di = {}
# index of of station in grid
staind = [int(starow[x]) for x in ['Xin', 'Yin', 'Zin'] if x in starow]
# make array of infinity to pad travel time boundaries (bounded tt)
ttb = np.ones(np.add(np.shape(tt), num_blocks)) * np.inf
# fill in travel times in bottom corner
ttb[[slice(x, y) for x, y in zip([0] * len(tt.shape), tt.shape)]] = tt
startTime = tt[tuple(staind)] # start Time
timec = startTime # current time
cind = tuple(staind) # current index
relInd1 = [None]
turPoints.append(self.ind2coords(cind))
while timec > 0:
trail.append(cind)
cind, relInd2 = self._find_next_block(cind, ttb, num_blocks)
if any(relInd1) and any(relInd2 / float(max(abs(relInd2))) !=
relInd1 / float(max(abs(relInd1)))):
turPoints.append(self.ind2coords(cind))
relInd1 = relInd2
timec = ttb[cind]
turPoints.append(self.ind2coords(cind))
trail.append(cind)
di = {'station': starow.ID, 'event': everow.ID, 'startTime': startTime,
'turningPoints': turPoints}
return pd.Series(di)
def _find_next_block(self, cind, ttb, num_blocks):
neindex = [[int(x + y) for y in range(-num_blocks, num_blocks + 1)]
for x in cind] # index of all neighbors to slice
# slice of tt with all neibhors within num_blocks in any dim to ind
neiblock = ttb[np.ix_(*neindex)] - ttb[tuple(cind)]
for nin, value in np.ndenumerate(neiblock): # for index (nin) and value of slice
# index minus number of neighbors, centers 0 on active block
ni1 = np.array(nin) - num_blocks
norma = np.multiply(ni1, self.dx) # normalize for varying lengths
# RMS of distance from active block
normfact = np.sqrt(np.sum(np.square(norma)))
if value != 0.:
neiblock[nin] = value / normfact
relativeInd = np.add(np.unravel_index(neiblock.argmin(), neiblock.shape),
-num_blocks) # the relative index of block selected in slice
argmin = cind + relativeInd # the index of the block with the least
return tuple(argmin), relativeInd
### Kernel creation and associated functions
# Note : differently spaced velocity grid from forward problem allowed
def make_kernel(self, velocityDownSample=5):
"""
Function to create the output sparse matrix of travel times and
locations for inverse problem
Parameters
----------
velocityDownSample : int
A multiplier to the dx to get an output grid for inversion
"""
# Check velocityDownSample inputs
if not isinstance(velocityDownSample, int):
msg = 'velocityDownSample must be an int'
raise TypeError(msg)
# make sure velocityDownSample divides evenly
if any([len(x) % velocityDownSample != 0 for x in self.xls]):
msg = (('velocityDownSample %d is not a factor of all dimensions '
'%s of xls') % (velocityDownSample, str([len(x) for x in
self.xls])))
raise ValueError(msg)
if len(self.rays) < 1:
msg = ('fast marching has not yet been called or ray paths not '
'calculated. Calculating forward problem.')
print(msg)
self.fast_march()
# get coords of new grid system (for outputs)
params = self._mesh_grid(self.xmin, self.xmax, list(np.array(self.dx) *
float(velocityDownSample)), None)
self.vX, self.vxls, self.vxmax, self.vdx, self.vxnum = params
if velocityDownSample == 1: # if no down sampling
self.kernVel = self.velMod
else:
self.kernVel = zoom(self.velMod, 1. / np.array(velocityDownSample),
order=0) # kernel velocity model (downsampled)
# Get edges
edges = [list(np.array(self.vxls[num]) - self.vdx[num] / 2.)
for num in range(len(self.vdx))]
for num, a in enumerate(edges):
a.append(a[-1] + self.vdx[num])
kernel = []
for num, row in self.rays.iterrows(): # it each station event pair
kernel.append(self._get_paths(self.vxls, row, self.vdx, edges))
return np.array(kernel)
def _get_paths(self, vxls, row, vdx, edges):
kernrow = scipy.zeros([len(x) for x in vxls]) # a row of the kernel
turningPoints = row.turningPoints
for tnum in range(len(turningPoints) - 1):
coords = [] # coordinates of each step
mods = [] # index modifiers of each step
st = turningPoints[tnum] # current start point
stind = self._find_closest_index(st, vxls) # closest index to start
ed = turningPoints[tnum + 1] # current end point
edind = self._find_closest_index(ed, vxls) # closest index to end
dif = np.array(ed) - np.array(st) # vector of line segement
inddif = np.array(edind) - np.array(stind) # vector of index movement
# if line segment in same block, add len and cont to next turning point
if stind == edind:
kernrow[tuple(stind)] += norm(np.array(ed) - np.array(st))
continue
coords.append(st)
mods.append(stind)
# jacobian of line segment
J = [[float(x) / y if y != 0 else 0 for x in dif] for y in dif]
J = list(self._get_unique_rows(J))
for jnum, j in enumerate(J):
# if zero jacobian row (IE line doesnt move in this dimension)
if all(np.array(j) == 0):
continue
sign = np.sign(inddif[jnum]) # sign of step
firstStep = abs(edges[jnum][int(stind[jnum] + .5 + .5 * sign)]
- st[jnum]) * sign
# which index ticks up for this jacobian
indtick = np.array([int(x == 1) for x in j]) * sign
if firstStep != 0:
coords.append(np.array(st) + (np.array(j) * firstStep))
mods.append(indtick)
for move in range(1, abs(inddif[jnum])):
coords.append(coords[-1] + np.array(j) * np.array(vdx[jnum]) * sign)
mods.append(indtick)
ar = np.hstack([coords, mods])
# add end coord to end of array
ar = np.vstack([ar, np.concatenate((ed, np.zeros(len(st))))])
sortindex = np.lexsort(ar[:, :len(vdx)].transpose())
# sorted array with [coords,indexmods], only unique rows
arsrt = self._get_unique_rows(ar[sortindex])
endar = np.array([all(x == ed) for x in arsrt[:, :len(ed)]]).argmax()
if endar == 0: # if end actually occurs at begining switch order
arsrt = arsrt[::-1]
endar = np.array([all(x == ed) for x in arsrt[:, :len(ed)]]).argmax()
arsrt = arsrt[:endar + 1] # trim to end point
norms = [scipy.linalg.norm(x) for x in (arsrt[1:, :len(st)] -
arsrt[:-1, :len(st)])] # length of each line segment
gridindex = np.cumsum(arsrt[:, len(st):], axis=0)[:-1]
for indnum, gridind in enumerate(gridindex):
grin = tuple([int(x) for x in gridind]) # convert to tuple and ints
kernrow[grin] += norms[int(indnum)]
return np.ravel(kernrow) # returned a flattened version of the kernal row
def _find_closest_index(self, point, xls):
return [abs(point[num] - xls[num]).argmin()
for num in range(len(xls))] # index of best fit x point in L1 sense
def _get_unique_rows(self, a):
# Method for getting unique rows of a np array and preserving order
df = pd.DataFrame(np.array(a)).drop_duplicates()
df = df.convert_objects(convert_numeric=True) # make sure dtypes are right
return np.array(df)
### Get residuals
def calc_residuals(self):
"""
Get the residuals from the phases at each station
"""
if not len(self.phases): # no phases, cant calc rms
return None
if not self.tts:
self.fast_march(get_rays=False)
# get index for each station
sinds = [tuple(self.stations[x]) for x in self.inkey]
# get travel time to each station for each event, stuff into df add SID
stimes = [x[sinds] for key, x in self.tts.items()]
sti_df = pd.DataFrame(stimes, columns=self.stations.ID, index=['pTT'])
sti_df = sti_df[self.phases.SID].T
sti_df['SID'] = sti_df.index
# merge into current phases
self.phases = self.phases.merge(sti_df)
# calc residuals and set _rms attr
self.phases['resid'] = self.phases.TT - self.phases.pTT
self._rms = norm(self.phases.resid) / np.sqrt(len(self.phases))
### Visualization methods
def plot_contours_2D(self, rayPlotStr='k'):
"""
Make a 2d plot of contours and calcualted ray paths
Parameters
----------
rayPlotStr : str
A plot string recognizable by matplot lib for plotting ray paths
"""
if len(self.tts) < 1:
raise Exception('No travel times calculated, run fast_march')
for ttnum, tt in self.tts.items():
gs = gridspec.GridSpec(2, 2, width_ratios=[10, .5],
height_ratios=[10, .5], wspace=.05, hspace=.05)
ax = plt.subplot(gs[0])
cmesh = ax.pcolormesh(self.xls[0], self.xls[1], self.velMod.transpose(),
cmap='rainbow', alpha=.2)
cs = ax.contour(self.X[0], self.X[1], tt)
ax.plot(self.stations.Xap, self.stations.Yap, 'v', lw=500)
ax.plot(self.events.Xap, self.events.Yap, 'o', lw=500)
ax.xaxis.tick_top()
plt.gca().invert_yaxis()
ax1 = plt.subplot(gs[1])
cb = plt.colorbar(cs, cax=ax1, label='Travel Time (s)')
ax2 = plt.subplot(gs[2])
plt.colorbar(cmesh, cax=ax2, orientation='horizontal',
label='Velocity (m/s)') # ,cax=caxvel)
for num, row in self.rays.iterrows():
ax.plot(np.array(row.turningPoints)[:, 0],
np.array(row.turningPoints)[:, 1], rayPlotStr, lw=4)
for line in cb.lines: # change width of each line
line.set_linewidth(20)
ax.set_aspect('equal')
plt.show()
### Misc. functions
def ind2coords(self, inds):
"""
Convert index (of forward problem) to spatial coordinates
"""
return [self.xls[num][x] for num, x in enumerate(inds)]
def convert_1D(self, vels, deps):
"""
Convert a 1D vmod with velocities and depths to a grid of the
appropriate size velocities and depths are in KM!!!!
"""
# deps = [self.dmin] + list(deps) + [self.dmax]
mod = np.ones_like(self.X[0]) * vels[0]
for vnum, vel in enumerate(vels):
con1 = self.X[-1] > deps[vnum] * 1000.
con2 = self.X[-1] <= deps[vnum + 1] * 1000.
mod[con1 & con2] = vel * 1000.
return mod
def station_fast_march(self, recalculate=False):
"""
Generate station travel time grids. These essentially treat the
station as the source and calculate travel time grids to each
possible event location
Parameters
----------
recalculate : bool
If True recalculate travel time grides if already calculated
Returns
-------
"""
stas = set(self.stations.ID)
# try loading the disk store
self.load_hdf5_store()
# if station grids already there skip
if stas.issubset(self.station_tts) and not recalculate:
return
# iterate through the stations and calc travel time grid
for ind, row in self.stations.iterrows():
phi = self._get_phi(row)
tts = skfmm.travel_time(phi, self.velMod, dx=self.dx)
self.station_tts[row.ID] = tts
# if old store is present delete it
if os.path.exists(self.hdf5_path):
os.remove(self.hdf5_path)
# create a store
self.create_hdf5_store(store_type='station')
def load_hdf5_store(self, store_type='station'):
"""Function to load the hdf5 cache (if it exists) into memory"""
if os.path.exists(self.hdf5_path) and self.hdf5_path is not None:
with tables.open_file(self.hdf5_path, 'r') as f:
for ind, sta in self.stations.iterrows():
name = self._get_hdf5_name(sta.ID, store_type)
tts = self._load_hdf5_array(name, f)
self.station_tts[sta.ID] = tts
def create_hdf5_store(self, store_type='station'):
"""Saves travel time grides to disk"""
if not os.path.exists(self.hdf5_path) and self.hdf5_path is not None:
with tables.openFile(self.hdf5_path, 'w') as f:
for ind, sta in self.stations.iterrows():
name = self._get_hdf5_name(sta.ID, store_type)
tts = self.station_tts[sta.ID]
self._write_hdf5_array(name, tts, f)
def _write_hdf5_array(self, name, tts, f):
atom = tables.Atom.from_dtype(tts.dtype)
# save with compression
#filters = tables.Filters(complib='blosc', complevel=5)
# ds = f.createCArray(f.root, 'all_data', atom, all_data.shape,
# filters=filters)
# save w/o compression
ds = f.createCArray(f.root, name, atom, tts.shape)
ds[:] = tts
def _load_hdf5_array(self, name, f):
tts = getattr(f.root, name)[:]
return tts
def _get_hdf5_name(self, id, otype):
"""
Get the name of the expected chunk in the pytables data store
Parameters
----------
id : int, float, or str
The ID of the event or station
otype : str (station or event)
The type of object to generate a name for
Returns
-------
The str of the expected name
"""
if otype == 'station':
pre = 'sta_'
elif otype == 'event':
pre = 'eve_'
else:
msg = '%s is not a supported otype' % otype
raise ValueError(msg)
return pre + '%s' % id
def locate(self):
"""
Simple location algorithm to locate the events found in the phases
file
Returns
-------
A dataframe with estimated locations
"""
# if station tt grids aren't caclulated do it
if not self.station_tts:
self.station_fast_march()
# concate into 3 or 4 d array
ar = np.concatenate([x[..., None] for x in self.station_tts.values()],
axis=len(self.X))
# demean
ar = ar - ar.mean(axis=-1, keepdims=True)
# arrange arrays
def _flatten_keys(cha):
"""Truncate the keys of the class values, shadow with new values
on instance to only be 2D"""
for key in ['spakey', 'apkey', 'inkey']:
trunc = getattr(cha, key)[:2]
setattr(cha, key, trunc)
return cha
class ChaboDriver:
"""
A meta-chabo, primarily for doing velocity model work. Can be used
to squish a 3d chabo into 2D when doing 1D model work
"""
def __init__(self, cha, parallel=True, cores=-1, squish=True):
"""
Init a ChaboDriver, requires a base chabo instance
Parameters
--------
cha : instance of Chabo
The base case chabo instance
parallel : bool
If True use joblib for multiprocessing
cores : int ot None
The number of cores for multiprocessing stuff, -1
means use all available
Squish : bool
If True squish a 3D problem to 2D
"""
self.parallel = parallel
self.cores = cores
if squish: # flatten the keys if they are to be squished
skey = self._flatten_stakey(cha.stations)
ekey = self._flatten_evekey(cha.events, cha.stations)
# set the spatial keys to 2d version
cha = _flatten_keys(cha)
else: # not tested
skey = cha.stations
ekey = cha.events
phkey = cha.phases
# init chabos for each station
self.chas = {}
for sid in skey.ID.unique():
# get event key and station key for this station only
stas = skey[sid == skey.ID]
eves = ekey[sid == ekey.SID]
if phkey is not None:
phs = phkey[phkey.SID == sid]
else:
phs = None
grid = self._make_new_grid(stas, eves, cha)
cha_new = Chabo(xmin=grid.xmin, xmax=grid.xmax, dx=grid.dx,
stations=stas, phases=phs, events=eves)
self.chas[sid] = cha_new
def iter_models(self, modgen, output='residuals'):
"""
Make a panel of residual times (or travel times)
Parameters
----------
modgen : callable
A callable that returns an iterable of models,
model should be something that can attach with the
chabo.attach_velocity_model function
output : str
The type of output to return in the pannel (residuals or
travel_times)
"""
dfd = {} # a data frame dict for storing results
for sid, cha in self.chas.items():
models = yield_models(modgen, cha)
dfd[sid] = cha.iter_models(models, output=output, cores=self.cores,
parallel=self.parallel)
return pd.Panel(dfd)
def evaluate_models(self, pan, func):
"""
Evaluate the models using func contained in the pandas panel produced
by running self.iter_models, returns a series with model numbers
as keys and the result of func as values
Parameters
----------
pan : pd.Panel
The panel produced by the iter_models method
func : callable
A callable that operates on a dataframe of events (index) and
stations columns. Must handle NaNs as they do appear in the DFs
"""
from joblib import Parallel, delayed
# make a generator that slices the panel along the model num axis
models = pan.major_axis
df_gen = (pan.loc[:, x, :] for x in models)
cores = self.cores
# test
if self.parallel:
out = Parallel(n_jobs=cores)(delayed(func)(df) for df in df_gen)
else:
out = []
for df in df_gen:
out.append(func(df))
ser = pd.Series(out, models)
return ser
def _flatten_stakey(self, skey):
"""
Function to reduce a station key from 3 d to 2d. It doesn this by
setting the X to 0 and Y to depth. Then, the events will each be
smashed relative to the stations
"""
# make backups
skey['X_old'], skey['Y_old'], skey['Z_old'] = skey.X, skey.Y, skey.Z
skey['X'] = 0.0
skey['Y'] = skey.Z
return skey
def _flatten_evekey(self, evkey, stakey):
"""
Function to reduce the events from 3d to 1D. In order to do this each event
must have different locations for each station. The horizontal distance
between each station and the event is calculated and set as the X coord
The depth is set as the Z coord. An extra column is added for the SID so that
the event used in each station can be identified later
"""
evkey['X_old'], evkey['Y_old'], evkey['Z_old'] = evkey.X, evkey.Y, evkey.Z
out_list = []
# iterate over each station and create events for it
for snum, sta in stakey.iterrows():
# copy df for each station, adjust coords
df = evkey.copy()
df['SID'] = sta.ID
hdist = np.linalg.norm([sta.X_old - df.X, sta.Y_old - df.Y], axis=0)
df['X'] = hdist
df['Y'] = df.Z
out_list.append(df)
return pd.concat(out_list, ignore_index=True)
def _make_new_grid(self, sta, eves, cha):
"""
Function to make a new grid with only the necesary limits
"""
gr = namedtuple('Grid', 'X, xls, xmax, dx, xnum xmin')
xmin = [np.min(np.append(eves[col], sta[col]))
for col in cha.spakey]
xmax = [np.max(np.append(eves[col], sta[col]))
for col in cha.spakey]
xnum = cha.xnum
dx = cha.dx[:len(cha.spakey)]
X, xls, xmax, dx, xnum = cha._mesh_grid(xmin, xmax, dx, None)
return gr(X, xls, xmax, dx, xnum, xmin)
########## Functions for model evaluation
def eval_rms(df):
"""
Function to evaluate rms of dataframe
"""
d = df ** 2 # square
out = d.mean(skipna=True).mean(skipna=True) # double mean
# out = d.median(skipna=True).median(skipna=True) # double median
if np.isnan(out): # if NaN is produced return inf so model isn't selected
return np.inf
else:
return np.sqrt(out)
def eval_l1_norm(df):
"""
Function to evaluate L1 norm of dataframe
"""
# df.mean(skipna=True, axis=1).hist()
# plt.show()
# pudb.set_trace()
d = abs(df) # absolute value
out = d.mean(skipna=True).mean(skipna=True) # double mean
# out = d.median(skipna=True).median(skipna=True) # double median
# d.fillna(np.nan, inplace=True)
# # ar = d.values.reshape(-1, 1)
# # ar = ar[~np.isnan(ar)]
# pudb.set_trace()
if np.isnan(out): # if NaN is produced return inf so model isn't selected
return np.inf
else:
return out
def yield_models(mod_gen, cha):
""" Yield models from a modgen, should be vels and deps or
same shape as current velMod"""
models = mod_gen()
for mod in models:
con1 = hasattr(mod[0], 'shape') # duck type
con2 = np.shape(mod[0]) == np.shape(cha.X[0])
if con1 and con2:
yield mod[0], mod[1]
else:
(vels, deps), mod_num = mod
yield cha.convert_1D(vels, deps), mod_num
def _get_vels_deps(x):
"""
Convert a 1d model with [vels, deps] to a vels and depths array
"""
dim = len(x) // 2 + 1
vels = x[:dim]
if dim == 1:
deps = ()
else:
deps = x[dim:]
return vels, deps
def go(vmod, phi, dx, sind, modnum):
"""
Function called by iter_velocities, run in parallel
"""
tt = skfmm.travel_time(phi, vmod, dx)
return np.append(tt[sind], modnum)
def init_chabo_on_directory(directory, dx=None, xnum=None):
"""
Init a chabo instance by reading the files in a directory, finding
those that are chabo files, setting x, y, and z limits based on those
files.
Parameters
----------
directory : str
A path to the directory
dx : None, int, float, or list of int, float
The grid spacing, in meters. If a single number apply to all
dimensions, else list must be the same length as the dimensions
xnum : None, int, float, or list of int, float
The number of grid cells to have in each direction
Returns
-------
"""
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
XMPP-specific SASL profile.
"""
import re
from twisted.internet import defer
from twisted.words.protocols.jabber import sasl_mechanisms, xmlstream
from twisted.words.xish import domish
# The b64decode and b64encode functions from the base64 module are new in
# Python 2.4. For Python 2.3 compatibility, the legacy interface is used while
# working around MIMEisms.
try:
from base64 import b64decode, b64encode
except ImportError:
import base64
def b64encode(s):
return "".join(base64.encodestring(s).split("\n"))
b64decode = base64.decodestring
NS_XMPP_SASL = 'urn:ietf:params:xml:ns:xmpp-sasl'
def get_mechanisms(xs):
"""
Parse the SASL feature to extract the available mechanism names.
"""
mechanisms = []
for element in xs.features[(NS_XMPP_SASL, 'mechanisms')].elements():
if element.name == 'mechanism':
mechanisms.append(str(element))
return mechanisms
class SASLError(Exception):
"""
SASL base exception.
"""
class SASLNoAcceptableMechanism(SASLError):
"""
The server did not present an acceptable SASL mechanism.
"""
class SASLAuthError(SASLError):
"""
SASL Authentication failed.
"""
def __init__(self, condition=None):
self.condition = condition
def __str__(self):
return "SASLAuthError with condition %r" % self.condition
class SASLIncorrectEncodingError(SASLError):
"""
SASL base64 encoding was incorrect.
RFC 3920 specifies that any characters not in the base64 alphabet
and padding characters present elsewhere than at the end of the string
MUST be rejected. See also L{fromBase64}.
This exception is raised whenever the encoded string does not adhere
to these additional restrictions or when the decoding itself fails.
The recommended behaviour for so-called receiving entities (like servers in
client-to-server connections, see RFC 3920 for terminology) is to fail the
SASL negotiation with a C{'incorrect-encoding'} condition. For initiating
entities, one should assume the receiving entity to be either buggy or
malevolent. The stream should be terminated and reconnecting is not
advised.
"""
base64Pattern = re.compile("^[0-9A-Za-z+/]*[0-9A-Za-z+/=]{,2}$")
def fromBase64(s):
"""
Decode base64 encoded string.
This helper performs regular decoding of a base64 encoded string, but also
rejects any characters that are not in the base64 alphabet and padding
occurring elsewhere from the last or last two characters, as specified in
section 14.9 of RFC 3920. This safeguards against various attack vectors
among which the creation of a covert channel that "leaks" information.
"""
if base64Pattern.match(s) is None:
raise SASLIncorrectEncodingError()
try:
return b64decode(s)
except Exception, e:
raise SASLIncorrectEncodingError(str(e))
class SASLInitiatingInitializer(xmlstream.BaseFeatureInitiatingInitializer):
"""
Stream initializer that performs SASL authentication.
The supported mechanisms by this initializer are C{DIGEST-MD5}, C{PLAIN}
and C{ANONYMOUS}. The C{ANONYMOUS} SASL mechanism is used when the JID, set
on the authenticator, does not have a localpart (username), requesting an
anonymous session where the username is generated by the server.
Otherwise, C{DIGEST-MD5} and C{PLAIN} are attempted, in that order.
"""
feature = (NS_XMPP_SASL, 'mechanisms')
_deferred = None
def setMechanism(self):
"""
Select and setup authentication mechanism.
Uses the authenticator's C{jid} and C{password} attribute for the
authentication credentials. If no supported SASL mechanisms are
advertized by the receiving party, a failing deferred is returned with
a L{SASLNoAcceptableMechanism} exception.
"""
jid = self.xmlstream.authenticator.jid
password = self.xmlstream.authenticator.password
mechanisms = get_mechanisms(self.xmlstream)
if jid.user is not None:
if 'DIGEST-MD5' in mechanisms:
self.mechanism = sasl_mechanisms.DigestMD5('xmpp', jid.host, None,
jid.user, password)
elif 'PLAIN' in mechanisms:
self.mechanism = sasl_mechanisms.Plain(None, jid.user, password)
else:
raise SASLNoAcceptableMechanism()
else:
if 'ANONYMOUS' in mechanisms:
self.mechanism = sasl_mechanisms.Anonymous()
else:
raise SASLNoAcceptableMechanism()
def start(self):
"""
Start SASL authentication exchange.
"""
self.setMechanism()
self._deferred = defer.Deferred()
self.xmlstream.addObserver('/challenge', self.onChallenge)
self.xmlstream.addOnetimeObserver('/success', self.onSuccess)
self.xmlstream.addOnetimeObserver('/failure', self.onFailure)
self.sendAuth(self.mechanism.getInitialResponse())
return self._deferred
def sendAuth(self, data=None):
"""
Initiate authentication protocol exchange.
If an initial client response is given in C{data}, it will be
sent along.
@param data: initial client response.
@type data: C{str} or C{None}.
"""
auth = domish.Element((NS_XMPP_SASL, 'auth'))
auth['mechanism'] = self.mechanism.name
if data is not None:
auth.addContent(b64encode(data) or '=')
self.xmlstream.send(auth)
def sendResponse(self, data=''):
"""
Send response to a challenge.
@param data: client response.
@type data: C{str}.
"""
response = domish.Element((NS_XMPP_SASL, 'response'))
if data:
response.addContent(b64encode(data))
self.xmlstream.send(response)
def onChallenge(self, element):
"""
Parse challenge and send response from the mechanism.
@param element: the challenge protocol element.
@type element: L{domish.Element}.
"""
try:
challenge = fromBase64(str(element))
except SASLIncorrectEncodingError:
self._deferred.errback()
else:
self.sendResponse(self.mechanism.getResponse(challenge))
def onSuccess(self, success):
"""
Clean up observers, reset the XML stream and send a new header.
@param success: the success protocol element. For now unused, but
could hold additional data.
@type success: L{domish.Element}
"""
self.xmlstream.removeObserver('/challenge', self.onChallenge)
self.xmlstream.removeObserver('/failure', self.onFailure)
self.xmlstream.reset()
self.xmlstream.sendHeader()
self._deferred.callback(xmlstream.Reset)
def onFailure(self, failure):
"""
Clean up observers, parse the failure and errback the deferred.
@param failure: the failure protocol element. Holds details on
the error condition.
@type failure: L{domish.Element}
"""
self.xmlstream.removeObserver('/challenge', self.onChallenge)
self.xmlstream.removeObserver('/success', self.onSuccess)
try:
condition = failure.firstChildElement().name
except AttributeError:
condition = None
self._deferred.errback(SASLAuthError(condition))
|
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import ctypes
import numpy
import numpy as np
from pyscf import gto
from pyscf import lib
from pyscf.pbc import gto as pgto
L = 1.5
n = 41
cl = pgto.Cell()
cl.build(
a = [[L,0,0], [0,L,0], [0,0,L]],
mesh = [n,n,n],
atom = 'He %f %f %f' % ((L/2.,)*3),
basis = 'ccpvdz')
numpy.random.seed(1)
cl1 = pgto.Cell()
cl1.build(a = numpy.random.random((3,3)).T,
precision = 1e-9,
mesh = [n,n,n],
atom ='''He .1 .0 .0
He .5 .1 .0
He .0 .5 .0
He .1 .3 .2''',
basis = 'ccpvdz')
class KnownValues(unittest.TestCase):
def test_nimgs(self):
self.assertTrue(list(cl.get_nimgs(9e-1)), [1,1,1])
self.assertTrue(list(cl.get_nimgs(1e-2)), [2,2,2])
self.assertTrue(list(cl.get_nimgs(1e-4)), [3,3,3])
self.assertTrue(list(cl.get_nimgs(1e-6)), [4,4,4])
self.assertTrue(list(cl.get_nimgs(1e-9)), [5,5,5])
def test_Gv(self):
a = cl1.get_Gv()
self.assertAlmostEqual(lib.fp(a), -99.791927068519939, 10)
def test_SI(self):
a = cl1.get_SI()
self.assertAlmostEqual(lib.fp(a), (16.506917823339265+1.6393578329869585j), 10)
np.random.seed(2)
Gv = np.random.random((5,3))
a = cl1.get_SI(Gv)
self.assertAlmostEqual(lib.fp(a), (0.65237631847195221-1.5736011413431059j), 10)
def test_mixed_basis(self):
cl = pgto.Cell()
cl.build(
a = [[L,0,0], [0,L,0], [0,0,L]],
mesh = [n,n,n],
atom = 'C1 %f %f %f; C2 %f %f %f' % ((L/2.,)*6),
basis = {'C1':'ccpvdz', 'C2':'gthdzv'})
def test_dumps_loads(self):
cl1.loads(cl1.dumps())
def test_get_lattice_Ls(self):
#self.assertEqual(cl1.get_lattice_Ls([0,0,0]).shape, (1 , 3))
#self.assertEqual(cl1.get_lattice_Ls([1,1,1]).shape, (13 , 3))
#self.assertEqual(cl1.get_lattice_Ls([2,2,2]).shape, (57 , 3))
#self.assertEqual(cl1.get_lattice_Ls([3,3,3]).shape, (137, 3))
#self.assertEqual(cl1.get_lattice_Ls([4,4,4]).shape, (281, 3))
#self.assertEqual(cl1.get_lattice_Ls([5,5,5]).shape, (493, 3))
cell = pgto.M(atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391''',
unit='B',
basis = 'gth-dzvp',
pseudo = 'gth-pade',
a = '''
0.000000000 3.370137329 3.370137329
3.370137329 0.000000000 3.370137329
3.370137329 3.370137329 0.000000000''',
mesh = [15]*3)
rcut = max([cell.bas_rcut(ib, 1e-8) for ib in range(cell.nbas)])
self.assertEqual(cell.get_lattice_Ls(rcut=rcut).shape, (1361, 3))
rcut = max([cell.bas_rcut(ib, 1e-9) for ib in range(cell.nbas)])
self.assertEqual(cell.get_lattice_Ls(rcut=rcut).shape, (1465, 3))
def test_ewald(self):
cell = pgto.Cell()
cell.unit = 'B'
Lx = Ly = Lz = 5.
cell.a = numpy.diag([Lx,Ly,Lz])
cell.mesh = numpy.array([41]*3)
cell.atom = [['He', (2, 0.5*Ly, 0.5*Lz)],
['He', (3, 0.5*Ly, 0.5*Lz)]]
cell.basis = {'He': [[0, (1.0, 1.0)]]}
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
ew_cut = (20,20,20)
self.assertAlmostEqual(cell.ewald(.05, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(0.1, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(0.2, 100), -0.468640671931, 9)
self.assertAlmostEqual(cell.ewald(1 , 100), -0.468640671931, 9)
def check(precision, eta_ref, ewald_ref):
ew_eta0, ew_cut0 = cell.get_ewald_params(precision, mesh=[41]*3)
self.assertAlmostEqual(ew_eta0, eta_ref)
self.assertAlmostEqual(cell.ewald(ew_eta0, ew_cut0), ewald_ref, 9)
check(0.001, 3.15273336976, -0.468640679947)
check(1e-05, 2.77596886114, -0.468640671968)
check(1e-07, 2.50838938833, -0.468640671931)
check(1e-09, 2.30575091612, -0.468640671931)
cell = pgto.Cell()
numpy.random.seed(10)
cell.a = numpy.random.random((3,3))*2 + numpy.eye(3) * 2
cell.mesh = [41]*3
cell.atom = [['He', (1, 1, 2)],
['He', (3, 2, 1)]]
cell.basis = {'He': [[0, (1.0, 1.0)]]}
cell.verbose = 5
cell.output = '/dev/null'
cell.build()
self.assertAlmostEqual(cell.ewald(1, 20), -2.3711356723457615, 9)
self.assertAlmostEqual(cell.ewald(2, 10), -2.3711356723457615, 9)
self.assertAlmostEqual(cell.ewald(2, 5), -2.3711356723457615, 9)
def test_ewald_2d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,9,60]
cell.verbose = 0
cell.dimension = 2
cell.low_dim_ft_type = 'inf_vacuum'
cell.rcut = 3.6
cell.build()
# FIXME: why python 3.8 generates different value at 4th decimal place
self.assertAlmostEqual(cell.ewald(), 3898143.7149599474, 2)
def test_ewald_1d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,60,60]
cell.verbose = 0
cell.dimension = 1
cell.low_dim_ft_type = 'inf_vacuum'
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), 70.875156940393225, 7)
def test_ewald_0d_inf_vacuum(self):
cell = pgto.Cell()
cell.a = numpy.eye(3)
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [60] * 3
cell.verbose = 0
cell.dimension = 0
cell.low_dim_ft_type = 'inf_vacuum'
cell.build()
eref = cell.to_mol().energy_nuc()
self.assertAlmostEqual(cell.ewald(), eref, 2)
def test_ewald_2d(self):
cell = pgto.Cell()
cell.a = numpy.eye(3) * 4
cell.atom = 'He 0 0 0; He 0 1 1'
cell.unit = 'B'
cell.mesh = [9,9,60]
cell.verbose = 0
cell.dimension = 2
cell.rcut = 3.6
cell.build()
self.assertAlmostEqual(cell.ewald(), -5.1194779101355596, 9)
a = numpy.eye(3) * 3
a[0,1] = .2
c = pgto.M(atom='H 0 0.1 0; H 1.1 2.0 0; He 1.2 .3 0.2',
a=a, dimension=2, verbose=0)
self.assertAlmostEqual(c.ewald(), -3.0902098018260418, 9)
# def test_ewald_1d(self):
# cell = pgto.Cell()
# cell.a = numpy.eye(3) * 4
# cell.atom = 'He 0 0 0; He 0 1 1'
# cell.unit = 'B'
# cell.mesh = [9,60,60]
# cell.verbose = 0
# cell.dimension = 1
# cell.rcut = 3.6
# cell.build()
# self.assertAlmostEqual(cell.ewald(), 70.875156940393225, 8)
#
# def test_ewald_0d(self):
# cell = pgto.Cell()
# cell.a = numpy.eye(3)
# cell.atom = 'He 0 0 0; He 0 1 1'
# cell.unit = 'B'
# cell.mesh = [60] * 3
# cell.verbose = 0
# cell.dimension = 0
# cell.build()
# eref = cell.to_mol().energy_nuc()
# self.assertAlmostEqual(cell.ewald(), eref, 2)
def test_pbc_intor(self):
numpy.random.seed(12)
kpts = numpy.random.random((4,3))
kpts[0] = 0
self.assertEqual(list(cl1.nimgs), [34,23,20])
s0 = cl1.pbc_intor('int1e_ovlp_sph', hermi=0, kpts=kpts)
self.assertAlmostEqual(lib.fp(s0[0]), 492.30658304804126, 4)
self.assertAlmostEqual(lib.fp(s0[1]), 37.812956255000756-28.972806230140314j, 4)
self.assertAlmostEqual(lib.fp(s0[2]),-26.113285893260819-34.448501789693566j, 4)
self.assertAlmostEqual(lib.fp(s0[3]), 186.58921213429491+123.90133823378201j, 4)
s1 = cl1.pbc_intor('int1e_ovlp_sph', hermi=1, kpts=kpts[0])
self.assertAlmostEqual(lib.fp(s1), 492.30658304804126, 4)
def test_ecp_pseudo(self):
from pyscf.pbc.gto import ecp
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Cu 0 0 1; Na 0 1 0',
ecp = {'Na':'lanl2dz'},
pseudo = {'Cu': 'gthbp'})
self.assertTrue(all(cell._ecpbas[:,0] == 1))
cell = pgto.Cell()
cell.a = numpy.eye(3) * 8
cell.mesh = [11] * 3
cell.atom='''Na 0. 0. 0.
H 0. 0. 1.'''
cell.basis={'Na':'lanl2dz', 'H':'sto3g'}
cell.ecp = {'Na':'lanl2dz'}
cell.build()
# FIXME: ECP integrals segfault
v1 = ecp.ecp_int(cell)
mol = cell.to_mol()
v0 = mol.intor('ECPscalar_sph')
self.assertAlmostEqual(abs(v0 - v1).sum(), 0.029005926114411891, 8)
def test_ecp_keyword_in_pseudo(self):
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = 'lanl2dz',
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, 'lanl2dz')
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = {'na': 'lanl2dz'},
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'na': 'lanl2dz', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'O': 'gthbp'})
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'S 0 0 1',
ecp = {'S': 'gthbp', 'na': 'lanl2dz'},
pseudo = {'O': 'gthbp', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.ecp, {'na': 'lanl2dz', 'Cu': 'stuttgartrsc'})
self.assertEqual(cell.pseudo, {'S': 'gthbp', 'O': 'gthbp'})
def test_pseudo_suffix(self):
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Mg 0 0 1',
pseudo = {'Mg': 'gth-lda'})
self.assertEqual(cell.atom_nelec_core(0), 2)
cell = pgto.M(
a = np.eye(3)*5,
mesh = [9]*3,
atom = 'Mg 0 0 1',
pseudo = {'Mg': 'gth-lda q2'})
self.assertEqual(cell.atom_nelec_core(0), 10)
def pbc_intor_symmetry(self):
a = cl1.lattice_vectors()
b = numpy.linalg.inv(a).T * (numpy.pi*2)
kpts = numpy.random.random((4,3))
kpts[1] = b[0]+b[1]+b[2]-kpts[0]
kpts[2] = b[0]-b[1]-b[2]-kpts[0]
kpts[3] = b[0]-b[1]+b[2]+kpts[0]
s = cl1.pbc_intor('int1e_ovlp', kpts=kpts)
self.assertAlmostEqual(abs(s[0]-s[1].conj()).max(), 0, 12)
self.assertAlmostEqual(abs(s[0]-s[2].conj()).max(), 0, 12)
self.assertAlmostEqual(abs(s[0]-s[3] ).max(), 0, 12)
def test_basis_truncation(self):
b = pgto.basis.load('gthtzvp@3s1p', 'C')
self.assertEqual(len(b), 2)
self.assertEqual(len(b[0][1]), 4)
self.assertEqual(len(b[1][1]), 2)
def test_getattr(self):
from pyscf.pbc import scf, dft, cc, tdscf
cell = pgto.M(atom='He', a=np.eye(3)*4, basis={'He': [[0, (1, 1)]]})
self.assertEqual(cell.HF().__class__, scf.HF(cell).__class__)
self.assertEqual(cell.KS().__class__, dft.KS(cell).__class__)
self.assertEqual(cell.UKS().__class__, dft.UKS(cell).__class__)
self.assertEqual(cell.KROHF().__class__, scf.KROHF(cell).__class__)
self.assertEqual(cell.KKS().__class__, dft.KKS(cell).__class__)
self.assertEqual(cell.CCSD().__class__, cc.ccsd.RCCSD)
self.assertEqual(cell.TDA().__class__, tdscf.rhf.TDA)
self.assertEqual(cell.TDBP86().__class__, tdscf.rks.TDDFTNoHybrid)
self.assertEqual(cell.TDB3LYP().__class__, tdscf.rks.TDDFT)
self.assertEqual(cell.KCCSD().__class__, cc.kccsd_rhf.KRCCSD)
self.assertEqual(cell.KTDA().__class__, tdscf.krhf.TDA)
self.assertEqual(cell.KTDBP86().__class__, tdscf.krks.TDDFTNoHybrid)
self.assertRaises(AttributeError, lambda: cell.xyz)
self.assertRaises(AttributeError, lambda: cell.TDxyz)
def test_ghost(self):
cell = pgto.Cell(
atom = 'C 0 0 0; ghost 0 0 2',
basis = {'C': 'sto3g', 'ghost': gto.basis.load('sto3g', 'H')},
a = np.eye(3) * 3,
pseudo = 'gth-pade',
).run()
self.assertEqual(cell.nao_nr(), 6)
cell = pgto.M(atom='''
ghost-O 0.000000000 0.000000000 2.500000000
X_H -0.663641000 -0.383071000 3.095377000
ghost.H 0.663588000 0.383072000 3.095377000
O 1.000000000 0.000000000 2.500000000
H -1.663641000 -0.383071000 3.095377000
H 1.663588000 0.383072000 3.095377000
''',
a=np.eye(3) * 3,
pseudo={'default': 'gth-pade', 'ghost-O': 'gth-pade'},
basis='gth-dzv')
self.assertEqual(cell.nao_nr(), 24) # 8 + 2 + 2 + 8 + 2 + 2
self.assertTrue(len(cell._pseudo) == 3) # O, H, ghost-O in ecp
cell = pgto.M(atom='''
ghost-O 0.000000000 0.000000000 2.500000000
X_H -0.663641000 -0.383071000 3.095377000
ghost.H 0.663588000 0.383072000 3.095377000
O 1.000000000 0.000000000 2.500000000
''',
a=np.eye(3) * 3,
pseudo='gth-pade',
basis={'H': 'gth-dzv', 'o': 'gth-dzvp', 'ghost-O': 'gth-szv'})
self.assertEqual(cell.nao_nr(), 21) # 4 + 2 + 2 + 13
self.assertTrue(len(cell._pseudo) == 1) # only O in ecp
def test_exp_to_discard(self):
cell = pgto.Cell(
atom = 'Li 0 0 0; Li 1.5 1.5 1.5',
a = np.eye(3) * 3,
basis = "gth-dzvp",
exp_to_discard = .1
)
cell.build()
cell1 = pgto.Cell(
atom = 'Li@1 0 0 0; Li@2 1.5 1.5 1.5',
a = np.eye(3) * 3,
basis = "gth-dzvp",
exp_to_discard = .1
)
cell1.build()
for ib in range(len(cell._bas)):
nprim = cell.bas_nprim(ib)
nc = cell.bas_nctr(ib)
es = cell.bas_exp(ib)
es1 = cell1.bas_exp(ib)
ptr = cell._bas[ib, gto.mole.PTR_COEFF]
ptr1 = cell1._bas[ib, gto.mole.PTR_COEFF]
cs = cell._env[ptr:ptr+nprim*nc]
cs1 = cell1._env[ptr1:ptr1+nprim*nc]
self.assertAlmostEqual(abs(es - es1).max(), 0, 15)
self.assertAlmostEqual(abs(cs - cs1).max(), 0, 15)
if __name__ == '__main__':
print("Full Tests for pbc.gto.cell")
unittest.main()
|
|
# Easy paver commands for less command typing and more coding.
# Visit http://paver.github.com/paver/ to get started. - @brandondean Sept. 30
import subprocess
import sys
import json
import time
import optparse
import os
import re
from paver.easy import *
from openapi_spec_validator import validate_spec
path = path("./")
@task
@cmdopts([
optparse.make_option("-p", "--project", help="App Engine project to deploy", default="tbatv-prod-hrd"),
optparse.make_option("--yolo", action="store_true", help="Do not wait for the travis build to succeed #yolo", default=False),
optparse.make_option("--config", help="gcloud SDK configuration profile to use", default=""),
optparse.make_option("--version", help="App engine version to deploy", default=""),
optparse.make_option("--modules", help="Comma separated names of module yaml files to deploy", default=""),
optparse.make_option("--skip-cron", action="store_true", help="Do not deploy cron.yaml", default=False),
optparse.make_option("--app-cfg-dir", help="Place to find appcfg.py [deprecated]", default=""),
])
def deploy(options):
args = ["python", "deploy.py", "--project", options.deploy.project]
if options.deploy.yolo:
args.append("--yolo")
if options.deploy.config:
args.extend(["--config", options.deploy.config])
if options.deploy.version:
args.extend(["--version", options.deploy.version])
if options.deploy.modules:
args.extend(["--modules", options.deploy.modules])
if options.skip_cron:
args.append("--skip-cron")
if options.app_cfg_dir:
args.extend(["--app-cfg-dir", options.app_cfg_dir])
print "Running {}".format(subprocess.list2cmdline(args))
subprocess.call(args)
@task
def javascript():
"""Combine Compress Javascript"""
print("Combining and Compressing Javascript")
sh("python do_compress.py js")
@task
def gulp():
"""Update all npm dependencies and run 'gulp build' task"""
print("Running 'gulp build'")
sh("npm update && gulp build --production")
@task
def install_libs():
sh("pip install -r deploy_requirements.txt -t lib")
@task
def jinja2():
sh("python compile_jinja2_templates.py")
@task
def less():
"""Build and Combine CSS"""
print("Building and Combining CSS")
sh("lessc static/css/less_css/tba_style.main.less static/css/less_css/tba_style.main.css")
sh("lessc static/css/less_css/tba_style.gameday.less static/css/less_css/tba_style.gameday.css")
sh("python do_compress.py css")
@task
@cmdopts([
('commit=', 'c', 'Commit hash to lint'),
('base=', 'b', 'Lint all changes between the current HEAD and this base branch'),
])
def lint(options):
args = ""
if 'base' in options.lint:
args = "--base {}".format(options.lint.base)
elif 'commit' in options.lint:
args = "--commit {}".format(options.lint.commit)
sh("python ops/linter.py {}".format(args))
@task
def validate_swagger():
dir = "./static/swagger"
for fname in os.listdir(dir):
print("Checking {}...".format(fname))
if fname.endswith(".json"):
with open('{}/{}'.format(dir, fname), 'rb') as file:
try:
spec_dict = json.load(file)
except ValueError, e:
print("Invalid JSON")
print(e)
sys.exit(1)
try:
validate_spec(spec_dict)
except Exception, e:
print("Invalid OpenAPI Spec")
print(e)
sys.exit(1)
print("{} validated!".format(fname))
sys.exit(0)
@task
def make():
javascript()
gulp()
less()
jinja2()
build_time = time.ctime()
travis_job = os.environ.get('TRAVIS_BUILD_ID', '')
try:
git_branch_name = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
git_last_commit = subprocess.check_output(["git", "log", "-1"])
except Exception:
print "No git history found, falling back to defaults..."
git_branch_name = 'dev'
git_last_commit = 'dev'
data = {"git_branch_name": git_branch_name,
"git_last_commit": git_last_commit,
"build_time": build_time,
"build_number": travis_job,
}
with open("version_info.json", "w") as f:
f.write(json.dumps(data))
@task
def make_endpoints_config():
sh("python lib/endpoints/endpointscfg.py get_openapi_spec mobile_main.MobileAPI --hostname tbatv-prod-hrd.appspot.com")
sh("python lib/endpoints/endpointscfg.py get_openapi_spec clientapi.clientapi_service.ClientAPI --hostname tbatv-prod-hrd.appspot.com")
@task
def preflight():
"""Prep a prod push"""
install_libs()
test_function([])
make()
@task
def run():
"""Run local dev server"""
sh("dev_appserver.py dispatch.yaml app.yaml app-backend-tasks.yaml app-backend-tasks-b2.yaml api.yaml clientapi.yaml tasks.yaml")
@task
@consume_args
def test(args):
"""Run tests. Accepts an argument to match subnames of tests"""
test_function(args)
@task
def setup():
sh("pip install -r requirements.txt")
install_libs()
make()
@task
def test_fast():
"""Run tests that don't require HTTP"""
print("Running Fast Tests")
sh("python run_tests.py --test_pattern=test_math_*.py")
sh("python run_tests.py --test_pattern=test_*helper*.py")
sh("python run_tests.py --test_pattern=test_*parser*.py")
sh("python run_tests.py --test_pattern=test_*manipulator.py")
sh("python run_tests.py --test_pattern=test_*api.py")
sh("python run_tests.py --test_pattern=test_event.py")
sh("python run_tests.py --test_pattern=test_match_cleanup.py")
sh("python run_tests.py --test_pattern=test_event_group_by_week.py")
sh("python run_tests.py --test_pattern=test_event_team_repairer.py")
sh("python run_tests.py --test_pattern=test_event_team_updater.py")
sh("python run_tests.py --test_pattern=test_event_get_short_name.py")
@task
@cmdopts([
optparse.make_option("--key", help="Event, Team, or Match key to import", default="2016necmp"),
optparse.make_option("--project", help="App Engine Project", default=""),
optparse.make_option("--port", type=int, help="Local port running the API server", default=None),
])
def bootstrap(options):
"""Download and import an event or team from apiv3"""
log = '/var/log/tba.log'
key = options.bootstrap.key
url = None
if options.bootstrap.project:
url = "https://{}.appspot.com".format(options.bootstrap.project)
elif os.path.exists(log) and os.path.isfile(log):
prog = re.compile('Starting API server at: http://localhost:(\d{5})$')
with open(log, 'r') as f:
for line in f:
result = prog.search(line.strip())
if result:
url = "localhost:{}".format(result.group(1))
break
if not url:
if "port" not in options.bootstrap or not options.bootstrap.port:
print "Unable to find GAE remote API port, either tee the log to {} or provide --port".format(log)
return
url = "localhost:{}".format(options.bootstrap.port)
args = ["python", "bootstrap.py", "--url", url, key]
print "Running {}".format(subprocess.list2cmdline(args))
subprocess.call(args)
@task
def devserver():
sh("dev_appserver.py --skip_sdk_update_check=true --admin_host=0.0.0.0 --host=0.0.0.0 --datastore_path=/datastore/tba.db dispatch.yaml app.yaml app-backend-tasks.yaml app-backend-tasks-b2.yaml api.yaml clientapi.yaml tasks.yaml")
def test_function(args):
print("Running Tests")
test_pattern = ""
if len(args) > 0:
test_pattern = " --test_pattern=*%s*" % args[0]
sh("python run_tests.py%s" % test_pattern)
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
P2PConnection: A low-level connection object to a node's P2P interface
P2PInterface: A high-level interface object for communicating to a node over P2P
P2PDataStore: A p2p interface class that keeps a store of transactions and blocks
and can respond correctly to getdata and getheaders messages"""
import asyncio
from collections import defaultdict
from io import BytesIO
import logging
import struct
import sys
import threading
from test_framework.messages import (
CBlockHeader,
MIN_VERSION_SUPPORTED,
msg_addr,
msg_block,
MSG_BLOCK,
msg_blocktxn,
msg_cmpctblock,
msg_feefilter,
msg_getaddr,
msg_getblocks,
msg_getblocktxn,
msg_getdata,
msg_getheaders,
msg_headers,
msg_inv,
msg_mempool,
msg_notfound,
msg_ping,
msg_pong,
msg_reject,
msg_sendcmpct,
msg_sendheaders,
msg_tx,
MSG_TX,
MSG_TYPE_MASK,
msg_verack,
msg_version,
NODE_NETWORK,
NODE_WITNESS,
sha256,
)
from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
MESSAGEMAP = {
b"addr": msg_addr,
b"block": msg_block,
b"blocktxn": msg_blocktxn,
b"cmpctblock": msg_cmpctblock,
b"feefilter": msg_feefilter,
b"getaddr": msg_getaddr,
b"getblocks": msg_getblocks,
b"getblocktxn": msg_getblocktxn,
b"getdata": msg_getdata,
b"getheaders": msg_getheaders,
b"headers": msg_headers,
b"inv": msg_inv,
b"mempool": msg_mempool,
b"notfound": msg_notfound,
b"ping": msg_ping,
b"pong": msg_pong,
b"reject": msg_reject,
b"sendcmpct": msg_sendcmpct,
b"sendheaders": msg_sendheaders,
b"tx": msg_tx,
b"verack": msg_verack,
b"version": msg_version,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
class P2PConnection(asyncio.Protocol):
"""A low-level connection object to a node's P2P interface.
This class is responsible for:
- opening and closing the TCP connection to the node
- reading bytes from and writing bytes to the socket
- deserializing and serializing the P2P message header
- logging messages as they are sent and received
This class contains no logic for handing the P2P message payloads. It must be
sub-classed and the on_message() callback overridden."""
def __init__(self):
# The underlying transport of the connection.
# Should only call methods on this from the NetworkThread, c.f. call_soon_threadsafe
self._transport = None
@property
def is_connected(self):
return self._transport is not None
def peer_connect(self, dstaddr, dstport, net="regtest"):
assert not self.is_connected
self.dstaddr = dstaddr
self.dstport = dstport
# The initial message to send after the connection was made:
self.on_connection_send_msg = None
self.recvbuf = b""
self.magic_bytes = MAGIC_BYTES[net]
logger.debug('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
loop = NetworkThread.network_event_loop
conn_gen_unsafe = loop.create_connection(lambda: self, host=self.dstaddr, port=self.dstport)
conn_gen = lambda: loop.call_soon_threadsafe(loop.create_task, conn_gen_unsafe)
return conn_gen
def peer_disconnect(self):
# Connection could have already been closed by other end.
NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.abort())
# Connection and disconnection methods
def connection_made(self, transport):
"""asyncio callback when a connection is opened."""
assert not self._transport
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self._transport = transport
if self.on_connection_send_msg:
self.send_message(self.on_connection_send_msg)
self.on_connection_send_msg = None # Never used again
self.on_open()
def connection_lost(self, exc):
"""asyncio callback when a connection is closed."""
if exc:
logger.warning("Connection lost to {}:{} due to {}".format(self.dstaddr, self.dstport, exc))
else:
logger.debug("Closed connection to: %s:%d" % (self.dstaddr, self.dstport))
self._transport = None
self.recvbuf = b""
self.on_close()
# Socket read methods
def data_received(self, t):
"""asyncio callback when data is read from the socket."""
if len(t) > 0:
self.recvbuf += t
self._on_data()
def _on_data(self):
"""Try to read P2P messages from the recv buffer.
This method reads data from the buffer in a loop. It deserializes,
parses and verifies the P2P header, then passes the P2P payload to
the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.magic_bytes:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command not in MESSAGEMAP:
raise ValueError("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
self._log_message("receive", t)
self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
def on_message(self, message):
"""Callback for processing a P2P payload. Must be overridden by derived class."""
raise NotImplementedError
# Socket write methods
def send_message(self, message):
"""Send a P2P message over the socket.
This method takes a P2P payload, builds the P2P header and adds
the message to the send buffer to be sent over the socket."""
tmsg = self.build_message(message)
self._log_message("send", message)
return self.send_raw_message(tmsg)
def send_raw_message(self, raw_message_bytes):
if not self.is_connected:
raise IOError('Not connected')
def maybe_write():
if not self._transport:
return
# Python <3.4.4 does not have is_closing, so we have to check for
# its existence explicitly as long as Bitcoin Core supports all
# Python 3.4 versions.
if hasattr(self._transport, 'is_closing') and self._transport.is_closing():
return
self._transport.write(raw_message_bytes)
NetworkThread.network_event_loop.call_soon_threadsafe(maybe_write)
# Class utility methods
def build_message(self, message):
"""Build a serialized P2P message"""
command = message.command
data = message.serialize()
tmsg = self.magic_bytes
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
return tmsg
def _log_message(self, direction, msg):
"""Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
class P2PInterface(P2PConnection):
"""A high-level P2P interface class for communicating with a Bitcoin node.
This class provides high-level callbacks for processing P2P message
payloads, as well as convenience methods for interacting with the
node over P2P.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour."""
def __init__(self):
super().__init__()
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# The network services received from the peer
self.nServices = 0
def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
create_conn = super().peer_connect(*args, **kwargs)
if send_version:
# Send a version msg
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.on_connection_send_msg = vt # Will be sent soon after connection_made
return create_conn
# Message receiving methods
def on_message(self, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type."""
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(message)
except:
print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
raise
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self):
pass
def on_close(self):
pass
def on_addr(self, message): pass
def on_block(self, message): pass
def on_blocktxn(self, message): pass
def on_cmpctblock(self, message): pass
def on_feefilter(self, message): pass
def on_getaddr(self, message): pass
def on_getblocks(self, message): pass
def on_getblocktxn(self, message): pass
def on_getdata(self, message): pass
def on_getheaders(self, message): pass
def on_headers(self, message): pass
def on_mempool(self, message): pass
def on_notfound(self, message): pass
def on_pong(self, message): pass
def on_reject(self, message): pass
def on_sendcmpct(self, message): pass
def on_sendheaders(self, message): pass
def on_tx(self, message): pass
def on_inv(self, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
self.send_message(want)
def on_ping(self, message):
self.send_message(msg_pong(message.nonce))
def on_verack(self, message):
pass
def on_version(self, message):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
last_headers = self.last_message.get('headers')
if not last_headers:
return False
return last_headers.headers[0].rehash() == blockhash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getdata(self, timeout=60):
"""Waits for a getdata message.
Receiving any getdata message will satisfy the predicate. the last_message["getdata"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block/tx has been requested."""
test_function = lambda: self.last_message.get("getdata")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_getheaders(self, timeout=60):
"""Waits for a getheaders message.
Receiving any getheaders message will satisfy the predicate. the last_message["getheaders"]
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
test_function = lambda: self.last_message.get("getheaders")
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
wait_until(test_function, timeout=timeout, lock=mininode_lock)
# Message sending helper functions
def send_and_ping(self, message, timeout=60):
self.send_message(message)
self.sync_with_ping(timeout=timeout)
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
wait_until(test_function, timeout=timeout, lock=mininode_lock)
self.ping_counter += 1
# One lock for synchronizing all data access between the network event loop (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# P2PConnection acquires this lock whenever delivering a message to a P2PInterface.
# This lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = threading.RLock()
class NetworkThread(threading.Thread):
network_event_loop = None
def __init__(self):
super().__init__(name="NetworkThread")
# There is only one event loop and no more than one thread must be created
assert not self.network_event_loop
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
"""Start the network thread."""
self.network_event_loop.run_forever()
def close(self, timeout=10):
"""Close the connections and network event loop."""
self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop)
wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout)
self.network_event_loop.close()
self.join(timeout)
class P2PDataStore(P2PInterface):
"""A P2P data store class.
Keeps a block and transaction store and responds correctly to getdata and getheaders requests."""
def __init__(self):
super().__init__()
# store of blocks. key is block hash, value is a CBlock object
self.block_store = {}
self.last_block_hash = ''
# store of txs. key is txid, value is a CTransaction object
self.tx_store = {}
self.getdata_requests = []
def on_getdata(self, message):
"""Check for the tx/block in our stores and if found, reply with an inv message."""
for inv in message.inv:
self.getdata_requests.append(inv.hash)
if (inv.type & MSG_TYPE_MASK) == MSG_TX and inv.hash in self.tx_store.keys():
self.send_message(msg_tx(self.tx_store[inv.hash]))
elif (inv.type & MSG_TYPE_MASK) == MSG_BLOCK and inv.hash in self.block_store.keys():
self.send_message(msg_block(self.block_store[inv.hash]))
else:
logger.debug('getdata message type {} received.'.format(hex(inv.type)))
def on_getheaders(self, message):
"""Search back through our block store for the locator, and reply with a headers message if found."""
locator, hash_stop = message.locator, message.hashstop
# Assume that the most recent block added is the tip
if not self.block_store:
return
headers_list = [self.block_store[self.last_block_hash]]
maxheaders = 2000
while headers_list[-1].sha256 not in locator.vHave:
# Walk back through the block store, adding headers to headers_list
# as we go.
prev_block_hash = headers_list[-1].hashPrevBlock
if prev_block_hash in self.block_store:
prev_block_header = CBlockHeader(self.block_store[prev_block_hash])
headers_list.append(prev_block_header)
if prev_block_header.sha256 == hash_stop:
# if this is the hashstop header, stop here
break
else:
logger.debug('block hash {} not found in block store'.format(hex(prev_block_hash)))
break
# Truncate the list if there are too many headers
headers_list = headers_list[:-maxheaders - 1:-1]
response = msg_headers(headers_list)
if response is not None:
self.send_message(response)
def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, reject_reason=None, expect_disconnect=False, timeout=60):
"""Send blocks to test node and test whether the tip advances.
- add all blocks to our block_store
- send a headers message for the final block
- the on_getheaders handler will ensure that any getheaders are responded to
- if force_send is False: wait for getdata for each of the blocks. The on_getdata handler will
ensure that any getdata messages are responded to. Otherwise send the full block unsolicited.
- if success is True: assert that the node's tip advances to the most recent block
- if success is False: assert that the node's tip doesn't advance
- if reject_reason is set: assert that the correct reject message is logged"""
with mininode_lock:
for block in blocks:
self.block_store[block.sha256] = block
self.last_block_hash = block.sha256
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
if force_send:
for b in blocks:
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(blocks[-1])]))
wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout, lock=mininode_lock)
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
else:
self.sync_with_ping(timeout=timeout)
if success:
wait_until(lambda: node.getbestblockhash() == blocks[-1].hash, timeout=timeout)
else:
assert node.getbestblockhash() != blocks[-1].hash
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
"""Send txs to test node and test whether they're accepted to the mempool.
- add all txs to our tx_store
- send tx messages for all txs
- if success is True/False: assert that the txs are/are not accepted to the mempool
- if expect_disconnect is True: Skip the sync with ping
- if reject_reason is set: assert that the correct reject message is logged."""
with mininode_lock:
for tx in txs:
self.tx_store[tx.sha256] = tx
reject_reason = [reject_reason] if reject_reason else []
with node.assert_debug_log(expected_msgs=reject_reason):
for tx in txs:
self.send_message(msg_tx(tx))
if expect_disconnect:
self.wait_for_disconnect()
else:
self.sync_with_ping()
raw_mempool = node.getrawmempool()
if success:
# Check that all txs are now in the mempool
for tx in txs:
assert tx.hash in raw_mempool, "{} not found in mempool".format(tx.hash)
else:
# Check that none of the txs are now in the mempool
for tx in txs:
assert tx.hash not in raw_mempool, "{} tx found in mempool".format(tx.hash)
|
|
from scapy.packet import *
from scapy.fields import *
from scapy.layers.l2 import *
from scapy.layers.inet import *
##Layers
#Hello Message Format
#
# The PIM Hello message, as defined by PIM-SM [4], has the following
# format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |PIM Ver| Type | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Option Type | Option Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Option Value |
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# | . |
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Option Type | Option Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Option Value |
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# PIM Ver, Type, Reserved, Checksum
# Described above.
#
# Option Type
# The type of option given in the Option Value field. Available
# types are as follows:
#
# 0 Reserved
# 1 Hello Hold Time
# 2 LAN Prune Delay
# 3 - 16 Reserved
# 17 To be assigned by IANA
# 18 Deprecated and SHOULD NOT be used
# 19 DR Priority (PIM-SM Only)
# 20 Generation ID
# 21 State Refresh Capable
# 22 Bidir Capable
# 23 - 65000 To be assigned by IANA
# 65001 - 65535 Reserved for Private Use [9]
#
# Unknown options SHOULD be ignored.
class Pimv2Header(Packet):
name = "Pimv2 Header"
fields_desc = [ BitField("ver",2,4),
BitField("type",5,4),
ByteField("reserved",0),
XShortField("chksum",None) ]
def post_build(self, p, pay):
p += pay
if self.chksum is None:
ck = checksum(p)
p = p[:2]+chr(ck>>8)+chr(ck&0xff)+p[4:]
return p
class PimDmHelloOpt1(Packet):
name = "PimDm Hello Optiontype 1: Hello Hold Time"
fields_desc = [ ShortField("type", 1),
ShortField("length", 2),
ShortField("holdtime", 105) ]
class PimDmHelloOpt19(Packet):
name = "PimDm Hello Optiontype 19: Hello Hold Time"
fields_desc = [ ShortField("type", 19),
ShortField("length", 4),
IntField("dr_priority",1)]
class PimDmHelloOpt2(Packet):
name = "PimDm Hello Optiontype 2: T,LAN Prune Delay ,Override Interval "
fields_desc = [ ShortField("type", 2),
ShortField("length", 4),
BitField("T",0,1),
BitField("lan_prune_delay",1000,15),
ShortField("override_interval", 3000) ]
class PimDmHelloOpt20(Packet):
name = "PimDm Hello Optiontype 20: Hello Hold Time"
fields_desc = [ ShortField("type", 20),
ShortField("length", 4),
IntField("generation_id", 18671) ]
class PimDmHelloOpt21(Packet):
name = "PimDm Hello Optiontype 21: Hello Hold Time"
fields_desc = [ ShortField("type", 21),
ShortField("length", 4),
BitField("version",1,8),
BitField("lan_prune_delay",60,8),
ShortField("reserved", 0) ]
##Layers
#
# State Refresh Message Format
#
# PIM State Refresh Messages have the following format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |PIM Ver| Type | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Group Address (Encoded Group Format) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address (Encoded Unicast Format) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Originator Address (Encoded Unicast Format) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |R| Metric Preference |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Masklen | TTL |P|N|O|Reserved | Interval |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# PIM Ver, Type, Reserved, Checksum
# Described above.
class PimRefreshMessage(Packet):
name = "Pim-Dm Refresh Message"
fields_desc = [ IntField("Additem1", 0x01000020),
IPField("group", "228.0.0.1"),
ShortField("Additem2", 0x0100),
IPField("source", "39.1.1.9"),
ShortField("Additem3", 0x0100),
IPField("orig_address","39.1.1.3"),
BitField("R",0,1),
BitField("metric_preference",120,31),
IntField("metric",2),
ByteField("masklen",24),
ByteField("TTL",63),
BitField("P",0,1),
BitField("N",1,1),
BitField("O",0,1),
BitField("reserved",0,5),
ByteField("interval",60)]
##Layers
#
## Assert Message Format
#
# PIM Assert Messages, as defined in PIM-SM [4], have the following
# format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |PIM Ver| Type | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Group Address (Encoded Group Format) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address (Encoded Unicast Format) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |R| Metric Preference |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Metric |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class PimAssertMessage(Packet):
name = "Pim-Dm Assert Message"
fields_desc = [ IntField("Additem1", 0x01000020),
IPField("group", "228.0.0.1"),
ShortField("Additem2", 0x0100),
IPField("source", "39.1.1.9"),
BitField("R",0,1),
BitField("metric_preference",120,31),
IntField("metric",2)]
bind_layers( IP, Pimv2Header, proto=103)
|
|
# Author: Christine Staiger
# Date: March 1st 2017
# Version: 0.1
"""
Python functions to test the connectivity and performance of the iRODS icommands iput and iget.
"""
import os
import json
import subprocess
import time
from timeit import default_timer as timer
import hashlib
from tqdm import tqdm
import shutil
RED = "\033[31m"
GREEN = "\033[92m"
BLUE = "\033[34m"
DEFAULT = "\033[0m"
def createTestData():
"""
Creates test data.
Folder: /home/<usr>/testdata or /<TMPDIR>/testdata ; TMPDIR is a shell variable
Files: 100MB, 1GB, 2GB, 5GB
Folders: 100 x 10MB
"""
if "TMPDIR" not in os.environ:
testdata = os.environ["HOME"]+"/testdata"
else:
testdata = os.environ["TMPDIR"]+"/testdata"
# Check whether test folder already exists. If not create one
if os.path.isdir(testdata):
print testdata, "exists"
else:
print "Create", testdata
os.makedirs(testdata)
# Create data
#100MB
print "Write sample100M.txt"
with open(testdata+"/sample100M.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 100))
#1GB
print "Write sample1G.txt"
with open(testdata+"/sample1G.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 1024))
#2GB
print "Write sample2G.txt"
with open(testdata+"/sample2G.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 1024 * 2))
#5GB
print "Write sample5G.txt"
with open(testdata+"/sample5G.txt_0", "wb") as f:
f.write(os.urandom(1024 * 1024 * 1024 * 5))
#Folder of 100*10MB files
print "Create 10MB*100"
os.makedirs(testdata+"/Coll10MB_0")
for i in range(100):
with open(testdata+"/Coll10MB_0/sample10MB_"+str(i)+".txt", "wb") as f:
f.write(os.urandom(1024 * 1024 * 10))
print "%sSUCCESS Test data created.%s" %(GREEN, DEFAULT)
def createEnvJSON(uname, host, zone, auth="PAM", ssl="none"):
"""
Creates the irods_environment.json
"""
# Check whether /home/<user>/.irods exists. If not create.
irodsdir = os.environ["HOME"]+"/.irods"
# Check whether test folder already exists. If not create one
if os.path.isdir(irodsdir):
print irodsdir, "exists"
else:
print "Create", irodsdir
os.makedirs(irodsdir)
# Create json file
irodsDict = {}
irodsDict["irods_user_name"] = uname
irodsDict["irods_host"] = host
irodsDict["irods_port"] = 1247
irodsDict["irods_zone_name"] = zone
irodsDict["irods_authentication_scheme"] = auth
irodsDict["irods_ssl_verify_server"] = ssl
print irodsDict
# Write to disc
print "Write", irodsdir+"/irods_environment.json"
with open(irodsdir+"/irods_environment.json", "w") as f:
json.dump(irodsDict, f)
# Do an iinit to cache the password
print "%sCaching password.%s" %(GREEN, DEFAULT)
#subprocess.call(["iinit"], shell=True)
subprocess.call(["ienv"], shell=True)
print "%sSUCCESS iRODS environment setup.%s" %(GREEN, DEFAULT)
def iRODScreateColl(collname):
"""
Creates an iRODS collection. If collection exists it starts
enumerating until new collection is created.
collname: Collection to create in iRODS, accepts absolute and relative collection paths
"""
count = 0
while(True):
p = subprocess.Popen(["imkdir "+collname+str(count)], shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err.startswith("ERROR"):
print RED, err, DEFAULT
count = count + 1
else:
break
print GREEN, "SUCCESS iRODS collection created:", DEFAULT, collname+str(count)
return collname+str(count)
def iRODSput(iresource, source, idestination):
"""
Wrapper for iRODS iput.
iresource: iRODS resource name
source: path to local file to upload, must be a file, accepts absolut and relative paths
idestination: iRODS destination, accepts absolut and relative collection paths
"""
p = subprocess.Popen(["time iput -r -b -K -f -R "+iresource+" "+source+" "+idestination],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
elapsed = [i.split("\t")[1] for i in err.strip("\n").split("\n")]
return (out, err, elapsed[0], elapsed[1], elapsed[2])
def iRODSget(iresource, isource, destination):
"""
Wrapper for iRODS iget.
iresource: iRODS resource name
source: path to local destination file, must be a file, accepts absolut and relative paths
idestination: iRODS source, accepts absolut and relative collection paths
"""
p = subprocess.Popen(["time iget -r -b -K -f -R "+iresource+" "+isource+" "+destination],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
elapsed = [i.split("\t")[1] for i in err.strip("\n").split("\n")]
return (out, err, elapsed[0], elapsed[1], elapsed[2])
def checkIntegrity(iRODSfile, localFile):
"""
Compares checksums of local file and iRODS file. Uses md5.
localFile: absolut path to local file
iRODSfile: iRODS absolut or relative path
"""
p = subprocess.Popen(["ils -L "+iRODSfile],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
irodschksum = [item for item in out.split(" ") if item !=""][7]
checksum = hashlib.md5(open(localFile, "rb").read()).hexdigest()
return irodschksum == checksum
def cleanUp(collections = ["CONNECTIVITY0", "PERFORMANCE0", "PERFORMANCEC0"],
folders = [os.environ["HOME"]+"/testdata"]):
"""
Removes iRODS collections and replicated testdata.
collections: List of absolut or relative collection names. Default ["CONNECTIVITY", "PERFORMANCE"].
folders: List of local folders. Default [os.environ["HOME"]+"/testdata"]
"""
if "TMPDIR" not in os.environ:
folders.append(os.environ["TMPDIR"]+"/testdata")
print "Remove iRODS collections"
for coll in collections:
p = subprocess.Popen(["irm -r "+coll],
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen(["irmtrash"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print "Remove duplicate data"
data = []
for folder in folders:
data.extend([folder+"/" + f
for f in os.listdir(folder) if not f.endswith("_0")])
for d in data:
if os.path.isfile(d):
os.remove(d)
else:
shutil.rmtree(d, ignore_errors=True)
print "%sClean up finished. %s" %(GREEN, DEFAULT)
def connectivity(iresource, data=os.environ["HOME"]+"/testdata/sample100M.txt_0"):
"""
Tests the conectivity to iresource with a 100MB file, checking port 1247 and the data ports.
iresource: iRODS resource
homedir: directory containing the testdata (home directory by default)
Returns a tuple: (date, resource, client, iput/iget, size, time)
"""
# Make sure you are in /home/<user>
os.chdir(os.environ["HOME"])
# Verify that /home/<usr>/testdata/sample100M.txt is there.
if not os.path.isfile(data):
print "%sERROR test data does not exist: %s"+data %(RED, DEFAULT)
raise Exception("File not found.")
print "Create iRODS Collection CONNECTIVITY*"
collection = iRODScreateColl("CONNECTIVITY")
print "iput -f -K -R iresource", data, collection+"/sample100M.txt"
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
err, out, elapsed, _, _ = iRODSput(iresource, data, collection+"/sample100M.txt")
if err.startswith("ERROR"):
print "%s" %(RED), err, "%s" %(DEFAULT)
raise Exception("iRODS ERROR")
# Test data integrity
if not checkIntegrity(collection+"/sample100M.txt", data):
print "%sERROR Checksums do not match.%s" %(RED, DEFAULT)
raise Exception("iRODS Data integrity")
result = (date, iresource, os.uname()[1], "iput", "100M", elapsed)
print GREEN, "SUCCESS", result, DEFAULT
return (date, iresource, os.uname()[1], "iput", "100M", elapsed)
def performanceSingleFiles(iresource, maxTimes = 10):
"""
Tests the performance of iget and iput for single files.
Test data needs to be stored under $HOME/testdata. The function omits subfolders.
It ping-pongs the data between the unix file system and iRODS collection:
iput folder/data_0 --> coll/data_1
iget coll/data_1 --> folder/data_1
iput folder/data_1 --> coll/data_2
iget coll/data_2 --> folder/data_2
...
iresource: iRODS resource
maxTimes: times how often the file is transferred with iput and iget.
Returns a list of tuples: [(date, resource, client, iput/iget, size, real time, user time, system time)]
"""
# If there is a tmp dir, use that for transferring the data
if "TMPDIR" not in os.environ:
testdata = os.environ["HOME"]+"/testdata"
else:
testdata = os.environ["TMPDIR"]+"/testdata"
dataset = [testdata+"/" + f
for f in os.listdir(testdata) if os.path.isfile(testdata+"/" + f)]
for data in dataset:
# Verify that data is there.
if not os.path.isfile(data):
print RED, "ERROR test data does not exist:", data, DEFAULT
raise Exception("File not found.")
print "Create iRODS Collection PERFORMANCE"
collection = iRODScreateColl("PERFORMANCE")
# Put and get data from iRODS using 1GB, 2GB and 5GB, store data with new file name "+_str(i)"
result = []
for data in dataset:
data = data.split("_")[0] # ge base name of the file --> no "_str(i)"
print "Put and get: ", data
for i in tqdm(range(1, maxTimes)):
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iput", data+"_"+str(i-1), collection+"/"+os.path.basename(data)+"_"+str(i)
out, err, real, user, sys = iRODSput(iresource, data+"_"+str(i-1),
collection+"/"+os.path.basename(data)+"_"+str(i))
print "integrity", collection+"/"+os.path.basename(data+"_"+str(i)), data+"_"+str(i-1)
if not checkIntegrity(collection+"/"+os.path.basename(data+"_"+str(i)), data+"_"+str(i-1)):
print "%sERROR Checksums do not match.%s" %(RED, DEFAULT)
raise Exception("iRODS Data integrity")
else:
print "Integrity done"
result.append((date, iresource, os.uname()[1], "iput", os.path.basename(data).split('.')[0][6:], real, user, sys))
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iget", collection+"/"+os.path.basename(data)+"_"+str(i), data+"_"+str(i)
out, err, real, user, sys = iRODSget(iresource, collection+"/"+os.path.basename(data+"_"+str(i)),
data+"_"+str(i))
print "integrity", collection+"/"+os.path.basename(data+"_"+str(i)), data+"_"+str(i)
if not checkIntegrity(collection+"/"+os.path.basename(data)+"_"+str(i), data+"_"+str(i)):
print "%sERROR Checksums do not match.%s" %(RED, DEFAULT)
raise Exception("iRODS Data integrity")
else:
print "Integrity done"
result.append((date, iresource, os.uname()[1], "iget", os.path.basename(data).split('.')[0][6:], real, user, sys))
return result
def performanceCollections(iresource, maxTimes = 10):
"""
Tests the performance of iget and iput for single files.
Test data needs to be stored under $HOME/testdata. The function omits subfolders.
It ping-pongs the data collections between the unix file system and iRODS collection:
iput folder/data_0/ --> coll/data_1/
iget coll/data_1/ --> folder/data_1/
iput folder/data_1/ --> coll/data_2/
iget coll/data_2/ --> folder/data_2/
...
iresource: iRODS resource
maxTimes: times how often the file is transferred with iput and iget.
Returns a list of tuples: [(date, resource, client, iput/iget, size, real time, user time, system time)]
"""
# If there is a tmp dir, use that for transferring the data
if "TMPDIR" not in os.environ:
testdata = os.environ["HOME"]+"/testdata"
else:
testdata = os.environ["TMPDIR"]+"/testdata"
dataset = [testdata+"/" + f
for f in os.listdir(testdata) if os.path.isdir(testdata+"/" + f)]
for data in dataset:
# Verify that data is not empty and data is there.
files = [f for f in os.listdir(data) if os.path.isfile(data+"/" + f)]
if len(files) == 0:
print RED, "ERROR collection empty:", data, DEFAULT
raise Exception("No files in data collection.")
print "Create iRODS Collection PERFORMANCEC"
collection = iRODScreateColl("PERFORMANCEC")
result = []
for data in dataset:
data = data.split("_")[0] # ge base name of the folder --> no "_str(i)"
print "Put and get: ", data
for i in tqdm(range(1, maxTimes)):
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iput -r", data+"_"+str(i-1), collection+"/"+os.path.basename(data)+"_"+str(i)
out, err, real, user, sys = iRODSput(iresource, data+"_"+str(i-1),
collection+"/"+os.path.basename(data)+"_"+str(i))
result.append((date, iresource, os.uname()[1], "iput", os.path.basename(data).split('.')[0][4:], real, user, sys))
date = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
print "iget -r", collection+"/"+os.path.basename(data)+"_"+str(i), data+"_"+str(i)
out, err, real, user, sys = iRODSget(iresource, collection+"/"+os.path.basename(data+"_"+str(i)),
data+"_"+str(i))
result.append((date, iresource, os.uname()[1], "iget", os.path.basename(data).split('.')[0][4:], real, user, sys))
#TODO:Integrity checks
return result
|
|
# -*- coding: UTF-8 -*-
#
"""
history
Press related content, names of papers
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3.0.0 - split all the content into babycontents
evb - note: only one dictionary named 'content' allowed per module
this limitation is to speed up loading
"""
__version__ = '4.0'
# ------------------------------------------------------
# journalism
#
content = {
'newspapers' : [
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_US#>',
'<#paper_British#>',
'<#paper_British#>',
'<#paper_German#>',
'<#paper_Spanish#>',
'<#paper_Italian#>',
'<#paper_Dutch#>',
'<#paper_French#>',
'<#paper_Other#>'
],
'magazines': [
'<#newspapers#>',
'<#paper_financial#>',
'<#living_section#> <#mag_sx#>',
'<#sports_section#> <#mag_sx#>',
'<#state_name#> <#mag_sx#>',
'<#portal_anyshortname#> <#mag_sx#>',
],
'magazinesections':[
'<#newssections#>',
'<#sports_section#>',
'<#portal_anyname#>',
'<#event_construct#>',
],
'newssections':[
'International', 'People', 'Travel', 'Politics', 'Background',
'Reveiled', 'Internet', 'Commuting', 'Legal', 'Law', 'News',
'Hidden Knowledge', 'Filter','Choices','Choice',
],
'interview':[
'interview', 'interrogation', 'q&a', 'personal', 'who is it?',
'interview', 'the person', 'getting personal', 'query',
'behind the person', 'nice meeting you',
],
'paper_financial' : [
'The Financial <#paper_generic_English#> (<#city#>)',
'Wall Street <#mag_sx#>',
'Market<#mag_sx#> (<#city#>)',
'The <#city#> Investor',
],
'paper_US' : [
'The <#cities_USmajor#> <#paper_generic_English#>',
'The <#cities_USmajor#> <#paper_generic_English#>',
'The <#cities_USmajor#> <#paper_generic_English#>',
'The <#town_us#> <#paper_generic_English#>',
'The <#town_us#> <#paper_generic_English#>',
# 'The <#paper_generic_English#> (<#cities_USmajor#>)',
# 'The <#paper_generic_English#> (<#cities_USmajor#>)',
# 'The <#paper_generic_English#> (<#cities_USmajor#>)',
# 'The <#paper_generic_English#> (<#cities_USmajor#>)',
# 'The <#paper_generic_English#> (<#cities_USmajor#>)',
'The <#town_us#> <#paper_generic_English#>-<#paper_generic_English#>'],
'paper_British' : [
'The <#cities_UK#> <#paper_generic_English#>',
'The <#paper_generic_English#>'],
'paper_German' : [
'<#paper_generic_German#>'],
'paper_Spanish' : [
'<#paper_generic_Spanish#>'],
'paper_Italian' : [
'<#paper_generic_Italian#>'],
'paper_Dutch' : [
'<#paper_generic_Dutch#>'],
'paper_French' : [
'<#paper_generic_French#>'],
'paper_Other' : [
'The Capetown <#paper_generic_English#>',
'The <#paper_generic_English#> (Hong Kong)',
'The Bombay <#paper_generic_English#>',
'<#paper_generic_Spanish#>',
'The Toronto <#paper_generic_English#>',
'<#paper_generic_French#>'
],
'mag_sx' : [
'Week',
'World',
'Watch',
'Watcher',
'Update',
'Journal',
'Speculator',
'Daily',],
'paper_generic_English' : [
'Adviser',
'Advertiser',
'Advocate',
'Bugle',
'Chronicle',
'Constitution',
'Courier',
'Companion',
'Dispatch',
'Daily',
'Express',
'Eagle',
'Enquirer',
'Fact',
'Focus',
'Financial',
'Forward',
'Free-Press',
'Gazette',
'Globe',
'Gleaner',
'Herald',
'Inquirer',
'Intelligencer',
'Impact',
'Independent',
'Informer',
'Industrial',
'Journal',
'Leader',
'Legend',
'Mercury',
'Monitor',
'Mirror',
'Messenger',
'News',
'Notice',
'Observer',
'Orbit',
'Press',
'Post',
'Picayune',
'Progress',
'Progressive',
'Quarterly',
'Quorum',
'Register',
'Review',
'Recorder',
'Reporter',
'Reader',
'Sentinel',
'Sun',
'Star',
'Spirit',
'Statesman',
'Times',
'Tribune',
'Telegraph',
'Telegram',
'Today',
'Union',
'Variety',
'Voice',
'Veritas',
'Weekly',
'World',
'Worker',
'Yeoman'
],
'paper_generic_German' : [
'Allgemeine',
'Tageszeitung',
'Volkskrant',
'Die Woche',
'Die Welt',
'Die Zeit',
'Zeitung'
],
'paper_generic_Spanish' : [
'El Diario',
'El Mundo',
'El Sol',
'El Tiempo',
'El Universal'
],
'paper_generic_Italian' : [
'Giornale',
'La Stampa',
'Il Messagero',
'La Prensa'
],
'paper_generic_Dutch' : [
'Krant',
'Telegraaf'
],
'paper_generic_French' : [
'Le Monde',
'Quotidien'
],
'newssource': ['<#company#>', '<#press#>'],
'press': ['<#tv#>', '<#newspapers#>', '<#mag_tech#>', '<#source_online#>'],
'seriouspress': ['<#tv#>', '<#newspapers#>','<#source_wireservice#>'],
'p_tv_px': ['A','C','N','B'],
'p_tv_sx': ['BC','BS','NN','SN','SN-FN','BC-FN'],
'tv': ['<#p_tv_px#><#p_tv_sx#>'],
'source_wireservice': ['AP','UP','Reuters'],
'source_online': ['Wired', 'Bloomberg', 'Gizmodo', "Medium", "FaceBook", "Twitter"],
'mag_tech_px': ['Mac','PC','Linux', 'Android', 'Mobile'],
'mag_tech_sx': ['User','Week','World','Journal'],
'mag_tech': ['<#mag_tech_px#><#mag_tech_sx#>']
}
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Volume Code.
"""
import cStringIO
import mox
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.notifier import test_notifier
from nova.openstack.common import importutils
import nova.policy
from nova import quota
from nova import rpc
from nova import test
import nova.volume.api
QUOTAS = quota.QUOTAS
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
super(VolumeTestCase, self).setUp()
self.compute = importutils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
self.stubs.Set(nova.flags.FLAGS, 'notification_driver',
'nova.notifier.test_notifier')
self.volume = importutils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
instance = db.instance_create(self.context, {})
self.instance_id = instance['id']
self.instance_uuid = instance['uuid']
test_notifier.NOTIFICATIONS = []
def tearDown(self):
db.instance_destroy(self.context, self.instance_id)
super(VolumeTestCase, self).tearDown()
@staticmethod
def _create_volume(size=0, snapshot_id=None):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['snapshot_id'] = snapshot_id
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(context.get_admin_context(), vol)
def test_ec2_uuid_mapping(self):
ec2_vol = db.ec2_volume_create(context.get_admin_context(),
'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa', 5)
self.assertEqual(5, ec2_vol['id'])
self.assertEqual('aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa',
db.get_volume_uuid_by_ec2_id(context.get_admin_context(), 5))
ec2_vol = db.ec2_volume_create(context.get_admin_context(),
'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaaaaa', 1)
self.assertEqual(1, ec2_vol['id'])
ec2_vol = db.ec2_volume_create(context.get_admin_context(),
'aaaaaaaa-bbbb-bbbb-bbbb-aaaaaaaaazzz')
self.assertEqual(6, ec2_vol['id'])
def test_create_delete_volume(self):
"""Test volume can be created and deleted."""
# Need to stub out reserve, commit, and rollback
def fake_reserve(context, expire=None, **deltas):
return ["RESERVATION"]
def fake_commit(context, reservations):
pass
def fake_rollback(context, reservations):
pass
self.stubs.Set(QUOTAS, "reserve", fake_reserve)
self.stubs.Set(QUOTAS, "commit", fake_commit)
self.stubs.Set(QUOTAS, "rollback", fake_rollback)
volume = self._create_volume()
volume_id = volume['id']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
self.assertEqual(volume_id, db.volume_get(context.get_admin_context(),
volume_id).id)
self.volume.delete_volume(self.context, volume_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 4)
self.assertRaises(exception.NotFound,
db.volume_get,
self.context,
volume_id)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_volume')
self.volume.driver.delete_volume(mox.IgnoreArg()) \
.AndRaise(exception.VolumeIsBusy)
self.mox.ReplayAll()
res = self.volume.delete_volume(self.context, volume_id)
self.assertEqual(True, res)
volume_ref = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(volume_id, volume_ref.id)
self.assertEqual("available", volume_ref.status)
self.mox.UnsetStubs()
self.volume.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
"""Test volume can be created from a snapshot."""
volume_src = self._create_volume()
self.volume.create_volume(self.context, volume_src['id'])
snapshot_id = self._create_snapshot(volume_src['id'])
self.volume.create_snapshot(self.context, volume_src['id'],
snapshot_id)
volume_dst = self._create_volume(0, snapshot_id)
self.volume.create_volume(self.context, volume_dst['id'], snapshot_id)
self.assertEqual(volume_dst['id'],
db.volume_get(
context.get_admin_context(),
volume_dst['id']).id)
self.assertEqual(snapshot_id, db.volume_get(
context.get_admin_context(),
volume_dst['id']).snapshot_id)
self.volume.delete_volume(self.context, volume_dst['id'])
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_src['id'])
def test_too_big_volume(self):
"""Ensure failure if a too large of a volume is requested."""
# FIXME(vish): validation needs to move into the data layer in
# volume_create
return True
try:
volume = self._create_volume('1001')
self.volume.create_volume(self.context, volume)
self.fail("Should have thrown TypeError")
except TypeError:
pass
def test_too_many_volumes(self):
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
vols = []
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
vols.append(volume['id'])
volume = self._create_volume()
self.assertRaises(db.NoMoreTargets,
self.volume.create_volume,
self.context,
volume['id'])
db.volume_destroy(context.get_admin_context(), volume['id'])
for volume_id in vols:
self.volume.delete_volume(self.context, volume_id)
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = '2' # m1.tiny
inst['ami_launch_index'] = 0
instance = db.instance_create(self.context, {})
instance_id = instance['id']
instance_uuid = instance['uuid']
mountpoint = "/dev/sdf"
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
if FLAGS.fake_tests:
db.volume_attached(self.context, volume_id, instance_uuid,
mountpoint)
else:
self.compute.attach_volume(self.context,
instance_uuid,
volume_id,
mountpoint)
vol = db.volume_get(context.get_admin_context(), volume_id)
self.assertEqual(vol['status'], "in-use")
self.assertEqual(vol['attach_status'], "attached")
self.assertEqual(vol['mountpoint'], mountpoint)
self.assertEqual(vol['instance_uuid'], instance_uuid)
self.assertRaises(exception.NovaException,
self.volume.delete_volume,
self.context,
volume_id)
if FLAGS.fake_tests:
db.volume_detached(self.context, volume_id)
else:
self.compute.detach_volume(self.context,
instance_uuid,
volume_id)
vol = db.volume_get(self.context, volume_id)
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
db.instance_destroy(self.context, instance_id)
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
targets = []
def _check(volume_id):
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume = self._create_volume()
d = self.volume.create_volume(self.context, volume['id'])
_check(d)
for volume_id in volume_ids:
self.volume.delete_volume(self.context, volume_id)
def test_multi_node(self):
# TODO(termie): Figure out how to test with two nodes,
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
@staticmethod
def _create_snapshot(volume_id, size='0'):
"""Create a snapshot object."""
snap = {}
snap['volume_size'] = size
snap['user_id'] = 'fake'
snap['project_id'] = 'fake'
snap['volume_id'] = volume_id
snap['status'] = "creating"
return db.snapshot_create(context.get_admin_context(), snap)['id']
def test_create_delete_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
self.volume.delete_snapshot(self.context, snapshot_id)
self.assertRaises(exception.NotFound,
db.snapshot_get,
self.context,
snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_cant_delete_volume_with_snapshots(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
self.assertEqual(snapshot_id,
db.snapshot_get(context.get_admin_context(),
snapshot_id).id)
volume['status'] = 'available'
volume['host'] = 'fakehost'
volume_api = nova.volume.api.API()
self.assertRaises(exception.InvalidVolume,
volume_api.delete,
self.context,
volume)
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_can_delete_errored_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
snapshot_id = self._create_snapshot(volume['id'])
self.volume.create_snapshot(self.context, volume['id'], snapshot_id)
snapshot = db.snapshot_get(context.get_admin_context(),
snapshot_id)
volume_api = nova.volume.api.API()
snapshot['status'] = 'badstatus'
self.assertRaises(exception.InvalidVolume,
volume_api.delete_snapshot,
self.context,
snapshot)
snapshot['status'] = 'error'
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume['id'])
def test_create_snapshot_force(self):
"""Test snapshot in use can be created forcibly."""
def fake_cast(ctxt, topic, msg):
pass
self.stubs.Set(rpc, 'cast', fake_cast)
volume = self._create_volume()
self.volume.create_volume(self.context, volume['id'])
db.volume_attached(self.context, volume['id'], self.instance_uuid,
'/dev/sda1')
volume_api = nova.volume.api.API()
volume = volume_api.get(self.context, volume['id'])
self.assertRaises(exception.InvalidVolume,
volume_api.create_snapshot,
self.context, volume,
'fake_name', 'fake_description')
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume,
'fake_name',
'fake_description')
db.snapshot_destroy(self.context, snapshot_ref['id'])
db.volume_destroy(self.context, volume['id'])
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
volume = self._create_volume()
volume_id = volume['id']
self.volume.create_volume(self.context, volume_id)
snapshot_id = self._create_snapshot(volume_id)
self.volume.create_snapshot(self.context, volume_id, snapshot_id)
self.mox.StubOutWithMock(self.volume.driver, 'delete_snapshot')
self.volume.driver.delete_snapshot(mox.IgnoreArg()) \
.AndRaise(exception.SnapshotIsBusy)
self.mox.ReplayAll()
self.volume.delete_snapshot(self.context, snapshot_id)
snapshot_ref = db.snapshot_get(self.context, snapshot_id)
self.assertEqual(snapshot_id, snapshot_ref.id)
self.assertEqual("available", snapshot_ref.status)
self.mox.UnsetStubs()
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_id)
def test_create_volume_usage_notification(self):
"""Ensure create volume generates appropriate usage notification"""
volume = self._create_volume()
volume_id = volume['id']
self.assertEquals(len(test_notifier.NOTIFICATIONS), 0)
self.volume.create_volume(self.context, volume_id)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
msg = test_notifier.NOTIFICATIONS[0]
self.assertEquals(msg['event_type'], 'volume.create.start')
msg = test_notifier.NOTIFICATIONS[1]
self.assertEquals(msg['priority'], 'INFO')
self.assertEquals(msg['event_type'], 'volume.create.end')
payload = msg['payload']
self.assertEquals(payload['tenant_id'], volume['project_id'])
self.assertEquals(payload['user_id'], volume['user_id'])
self.assertEquals(payload['volume_id'], volume['id'])
self.assertEquals(payload['status'], 'creating')
self.assertEquals(payload['size'], volume['size'])
self.assertTrue('display_name' in payload)
self.assertTrue('snapshot_id' in payload)
self.assertTrue('launched_at' in payload)
self.assertTrue('created_at' in payload)
self.volume.delete_volume(self.context, volume_id)
class DriverTestCase(test.TestCase):
"""Base Test class for Drivers."""
driver_name = "nova.volume.driver.FakeBaseDriver"
def setUp(self):
super(DriverTestCase, self).setUp()
self.flags(volume_driver=self.driver_name,
logging_default_format_string="%(message)s")
self.volume = importutils.import_object(FLAGS.volume_manager)
self.context = context.get_admin_context()
self.output = ""
def _fake_execute(_command, *_args, **_kwargs):
"""Fake _execute."""
return self.output, None
self.volume.driver.set_execute(_fake_execute)
log = logging.getLogger()
self.stream = cStringIO.StringIO()
log.logger.addHandler(logging.logging.StreamHandler(self.stream))
inst = {}
instance = db.instance_create(self.context, {})
self.instance_id = instance['id']
self.instance_uuid = instance['uuid']
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
return []
def _detach_volume(self, volume_id_list):
"""Detach volumes from an instance."""
for volume_id in volume_id_list:
db.volume_detached(self.context, volume_id)
self.volume.delete_volume(self.context, volume_id)
class VolumeDriverTestCase(DriverTestCase):
"""Test case for VolumeDriver"""
driver_name = "nova.volume.driver.VolumeDriver"
def test_delete_busy_volume(self):
"""Test deleting a busy volume."""
self.stubs.Set(self.volume.driver, '_volume_not_present',
lambda x: False)
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x, y: False)
# Want DriverTestCase._fake_execute to return 'o' so that
# volume.driver.delete_volume() raises the VolumeIsBusy exception.
self.output = 'o'
self.assertRaises(exception.VolumeIsBusy,
self.volume.driver.delete_volume,
{'name': 'test1', 'size': 1024})
# when DriverTestCase._fake_execute returns something other than
# 'o' volume.driver.delete_volume() does not raise an exception.
self.output = 'x'
self.volume.driver.delete_volume({'name': 'test1', 'size': 1024})
class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver"""
driver_name = "nova.volume.driver.ISCSIDriver"
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
vol_ref = db.volume_create(self.context, vol)
self.volume.create_volume(self.context, vol_ref['id'])
vol_ref = db.volume_get(self.context, vol_ref['id'])
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
db.volume_attached(self.context, vol_ref['id'], self.instance_uuid,
mountpoint)
volume_id_list.append(vol_ref['id'])
return volume_id_list
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self):
"""No log message when all the processes are running."""
volume_id_list = self._attach_volume()
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
for i in volume_id_list:
tid = db.volume_get_iscsi_target_num(self.context, i)
self.volume.driver.tgtadm.show_target(tid)
self.stream.truncate(0)
self.mox.ReplayAll()
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
def test_check_for_export_with_some_volume_missing(self):
"""Output a warning message when some volumes are not recognied
by ietd."""
volume_id_list = self._attach_volume()
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver.tgtadm, 'show_target')
self.volume.driver.tgtadm.show_target(tid).AndRaise(
exception.ProcessExecutionError())
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
self.volume.check_for_export,
self.context,
self.instance_id)
msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
self.assertTrue(0 <= self.stream.getvalue().find(msg))
self.mox.UnsetStubs()
self._detach_volume(volume_id_list)
class VolumePolicyTestCase(test.TestCase):
def setUp(self):
super(VolumePolicyTestCase, self).setUp()
nova.policy.reset()
nova.policy.init()
self.context = context.get_admin_context()
def tearDown(self):
super(VolumePolicyTestCase, self).tearDown()
nova.policy.reset()
def _set_rules(self, rules):
nova.common.policy.set_brain(nova.common.policy.HttpBrain(rules))
def test_check_policy(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
}
nova.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
nova.volume.api.check_policy(self.context, 'attach')
def test_check_policy_with_target(self):
self.mox.StubOutWithMock(nova.policy, 'enforce')
target = {
'project_id': self.context.project_id,
'user_id': self.context.user_id,
'id': 2,
}
nova.policy.enforce(self.context, 'volume:attach', target)
self.mox.ReplayAll()
nova.volume.api.check_policy(self.context, 'attach', {'id': 2})
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import six
import unittest
from airflow import configuration, AirflowException
from airflow.models.connection import Connection
from airflow.utils import db
from mock import patch, call
from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook
class TestSparkSubmitHook(unittest.TestCase):
_spark_job_file = 'test_application.py'
_config = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'conn_id': 'default_spark',
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/spark@airflow.org',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
}
@staticmethod
def cmd_args_to_dict(list_cmd):
return_dict = {}
for arg in list_cmd:
if arg.startswith("--"):
pos = list_cmd.index(arg)
return_dict[arg] = list_cmd[pos + 1]
return return_dict
def setUp(self):
configuration.load_test_config()
db.merge_conn(
Connection(
conn_id='spark_yarn_cluster', conn_type='spark',
host='yarn://yarn-master',
extra='{"queue": "root.etl", "deploy-mode": "cluster"}')
)
db.merge_conn(
Connection(
conn_id='spark_k8s_cluster', conn_type='spark',
host='k8s://https://k8s-master',
extra='{"spark-home": "/opt/spark", ' +
'"deploy-mode": "cluster", ' +
'"namespace": "mynamespace"}')
)
db.merge_conn(
Connection(
conn_id='spark_default_mesos', conn_type='spark',
host='mesos://host', port=5050)
)
db.merge_conn(
Connection(
conn_id='spark_home_set', conn_type='spark',
host='yarn://yarn-master',
extra='{"spark-home": "/opt/myspark"}')
)
db.merge_conn(
Connection(
conn_id='spark_home_not_set', conn_type='spark',
host='yarn://yarn-master')
)
db.merge_conn(
Connection(
conn_id='spark_binary_set', conn_type='spark',
host='yarn', extra='{"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
Connection(
conn_id='spark_binary_and_home_set', conn_type='spark',
host='yarn',
extra='{"spark-home": "/path/to/spark_home", ' +
'"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster', conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "cluster"}')
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster_client_mode', conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "client"}')
)
def test_build_spark_submit_command(self):
# Given
hook = SparkSubmitHook(**self._config)
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_build_cmd = [
'spark-submit',
'--master', 'yarn',
'--conf', 'parquet.compression=SNAPPY',
'--files', 'hive-site.xml',
'--py-files', 'sample_library.py',
'--jars', 'parquet.jar',
'--packages', 'com.databricks:spark-avro_2.11:3.2.0',
'--exclude-packages', 'org.bad.dependency:1.0.0',
'--repositories', 'http://myrepo.org',
'--num-executors', '10',
'--total-executor-cores', '4',
'--executor-cores', '4',
'--executor-memory', '22g',
'--driver-memory', '3g',
'--keytab', 'privileged_user.keytab',
'--principal', 'user/spark@airflow.org',
'--name', 'spark-job',
'--class', 'com.foo.bar.AppMain',
'--verbose',
'test_application.py',
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
self.assertEquals(expected_build_cmd, cmd)
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSubmitHook(conn_id='')
hook.submit()
# Then
self.assertEqual(mock_popen.mock_calls[0],
call(['spark-submit', '--master', 'yarn',
'--name', 'default-name', ''],
stderr=-2, stdout=-1, universal_newlines=True, bufsize=-1))
def test_resolve_should_track_driver_status(self):
# Given
hook_default = SparkSubmitHook(conn_id='')
hook_spark_yarn_cluster = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook_spark_k8s_cluster = SparkSubmitHook(conn_id='spark_k8s_cluster')
hook_spark_default_mesos = SparkSubmitHook(conn_id='spark_default_mesos')
hook_spark_home_set = SparkSubmitHook(conn_id='spark_home_set')
hook_spark_home_not_set = SparkSubmitHook(conn_id='spark_home_not_set')
hook_spark_binary_set = SparkSubmitHook(conn_id='spark_binary_set')
hook_spark_binary_and_home_set = SparkSubmitHook(
conn_id='spark_binary_and_home_set')
hook_spark_standalone_cluster = SparkSubmitHook(
conn_id='spark_standalone_cluster')
# When
should_track_driver_status_default = hook_default \
._resolve_should_track_driver_status()
should_track_driver_status_spark_yarn_cluster = hook_spark_yarn_cluster \
._resolve_should_track_driver_status()
should_track_driver_status_spark_k8s_cluster = hook_spark_k8s_cluster \
._resolve_should_track_driver_status()
should_track_driver_status_spark_default_mesos = hook_spark_default_mesos \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_set = hook_spark_home_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_home_not_set = hook_spark_home_not_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_set = hook_spark_binary_set \
._resolve_should_track_driver_status()
should_track_driver_status_spark_binary_and_home_set = \
hook_spark_binary_and_home_set._resolve_should_track_driver_status()
should_track_driver_status_spark_standalone_cluster = \
hook_spark_standalone_cluster._resolve_should_track_driver_status()
# Then
self.assertEqual(should_track_driver_status_default, False)
self.assertEqual(should_track_driver_status_spark_yarn_cluster, False)
self.assertEqual(should_track_driver_status_spark_k8s_cluster, False)
self.assertEqual(should_track_driver_status_spark_default_mesos, False)
self.assertEqual(should_track_driver_status_spark_home_set, False)
self.assertEqual(should_track_driver_status_spark_home_not_set, False)
self.assertEqual(should_track_driver_status_spark_binary_set, False)
self.assertEqual(should_track_driver_status_spark_binary_and_home_set, False)
self.assertEqual(should_track_driver_status_spark_standalone_cluster, True)
def test_resolve_connection_yarn_default(self):
# Given
hook = SparkSubmitHook(conn_id='')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
def test_resolve_connection_yarn_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"spark_home": None,
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
self.assertEqual(dict_cmd["--queue"], "root.default")
def test_resolve_connection_mesos_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default_mesos')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "mesos://host:5050",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "mesos://host:5050")
def test_resolve_connection_spark_yarn_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": "root.etl",
"spark_home": None,
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn://yarn-master")
self.assertEqual(dict_cmd["--queue"], "root.etl")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_k8s_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"spark_home": "/opt/spark",
"queue": None,
"spark_binary": "spark-submit",
"master": "k8s://https://k8s-master",
"deploy_mode": "cluster",
"namespace": "mynamespace"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "k8s://https://k8s-master")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/opt/myspark",
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/opt/myspark/bin/spark-submit')
def test_resolve_connection_spark_home_not_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_not_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'spark-submit')
def test_resolve_connection_spark_binary_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'custom-spark-submit')
def test_resolve_connection_spark_binary_and_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_and_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/custom-spark-submit')
def test_resolve_connection_spark_standalone_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "spark://spark-standalone-master:6066",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": 'default'}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/spark-submit')
def test_resolve_spark_submit_env_vars_standalone_client_mode(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster_client_mode',
env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertEqual(hook._env, {"bar": "foo"})
def test_resolve_spark_submit_env_vars_standalone_cluster_mode(self):
def env_vars_exception_in_standalone_cluster_mode():
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster',
env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertRaises(AirflowException,
env_vars_exception_in_standalone_cluster_mode)
def test_resolve_spark_submit_env_vars_yarn(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster',
env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertEqual(cmd[4], "spark.yarn.appMasterEnv.bar=foo")
def test_resolve_spark_submit_env_vars_k8s(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster',
env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
self.assertEqual(cmd[4], "spark.kubernetes.driverEnv.bar=foo")
def test_process_spark_submit_log_yarn(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot '
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 NodeManagers',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._yarn_application_id, 'application_1486558679801_1820')
def test_process_spark_submit_log_k8s(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,' +
'spark-role -> driver' +
'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42' +
'creation time: 2018-03-05T10:26:55Z' +
'service account name: spark' +
'volumes: spark-init-properties, download-jars-volume,' +
'download-files-volume, spark-token-2vmlm' +
'node name: N/A' +
'start time: N/A' +
'container images: N/A' +
'phase: Pending' +
'status: []' +
'2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,' +
' new state:' +
'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver' +
'namespace: default' +
'Exit code: 999'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._kubernetes_driver_pod,
'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver')
self.assertEqual(hook._spark_exit_code, 999)
def test_process_spark_submit_log_standalone_cluster(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request '
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' +
'created as driver-20171128111415-0001. Polling submission state...'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
self.assertEqual(hook._driver_id, 'driver-20171128111415-0001')
def test_process_spark_driver_status_log(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Submitting a request for the status of submission ' +
'driver-20171128111415-0001 in spark://spark-standalone-master:6066',
'17/11/28 11:15:37 INFO RestSubmissionClient: Server responded with ' +
'SubmissionStatusResponse:',
'{',
'"action" : "SubmissionStatusResponse",',
'"driverState" : "RUNNING",',
'"serverSparkVersion" : "1.6.0",',
'"submissionId" : "driver-20171128111415-0001",',
'"success" : true,',
'"workerHostPort" : "172.18.0.7:38561",',
'"workerId" : "worker-20171128110741-172.18.0.7-38561"',
'}'
]
# When
hook._process_spark_status_log(log_lines)
# Then
self.assertEqual(hook._driver_status, 'RUNNING')
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_yarn_process_on_kill(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your ' +
'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot ' +
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 ' +
'NodeManagerapplication_1486558679801_1820s',
'INFO Client: Submitting application application_1486558679801_1820 ' +
'to ResourceManager'
]
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
self.assertIn(call(['yarn', 'application', '-kill',
'application_1486558679801_1820'],
stderr=-1, stdout=-1),
mock_popen.mock_calls)
def test_standalone_cluster_process_on_kill(self):
# Given
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request ' +
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully ' +
'created as driver-20171128111415-0001. Polling submission state...'
]
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
hook._process_spark_submit_log(log_lines)
# When
kill_cmd = hook._build_spark_driver_kill_command()
# Then
self.assertEqual(kill_cmd[0], '/path/to/spark_home/bin/spark-submit')
self.assertEqual(kill_cmd[1], '--master')
self.assertEqual(kill_cmd[2], 'spark://spark-standalone-master:6066')
self.assertEqual(kill_cmd[3], '--kill')
self.assertEqual(kill_cmd[4], 'driver-20171128111415-0001')
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from yosaipy2.core import (
SessionSettings,
InvalidSessionException,
)
from session import session_tuple, SimpleSession, SessionKey
from session import NativeSessionHandler
from yosaipy2.core.utils.utils import get_logger
from typing import Dict
import abcs as session_abcs
class DelegatingSession(session_abcs.Session):
"""
A DelegatingSession is a client-tier representation of a server side
Session. This implementation is basically a proxy to a server-side
NativeSessionManager, which will return the proper results for each
method call.
A DelegatingSession will cache data when appropriate to avoid a remote
method invocation, only communicating with the server when necessary and
if write-through session caching is implemented.
Of course, if used in-process with a NativeSessionManager business object,
as might be the case in a web-based application where the web classes
and server-side business objects exist in the same namespace, a remote
method call will not be incurred.
"""
def __init__(self, session_manager, sessionkey):
# type: (NativeSessionManager, SessionKey) -> None
super(DelegatingSession, self).__init__()
self.session_key = sessionkey
self.session_manager = session_manager
self._start_timestamp = None
self._host = None
self.stop_session_callback = None # is set by Subject owner
@property
def session_id(self):
return self.session_key.session_id
@session_id.setter
def session_id(self, v):
raise Exception("do not support set of session id")
@property
def start_timestamp(self):
if not self._start_timestamp:
self._start_timestamp = self.session_manager.get_start_timestamp(self.session_key)
return self._start_timestamp
@property
def last_access_time(self):
return self.session_manager.get_last_access_time(self.session_key)
@property
def idle_timeout(self):
return self.session_manager.get_idle_timeout(self.session_key)
@idle_timeout.setter
def idle_timeout(self, timeout):
self.session_manager.set_idle_timeout(self.session_key, timeout)
@property
def absolute_timeout(self):
return self.session_manager.get_absolute_timeout(self.session_key)
@absolute_timeout.setter
def absolute_timeout(self, timeout):
self.session_manager.set_absolute_timeout(self.session_key, timeout)
@property
def host(self):
if not self._host:
self._host = self.session_manager.get_host(self.session_key)
return self._host
def touch(self):
self.session_manager.touch(self.session_key)
def stop(self, identifiers):
self.session_manager.stop(self.session_key, identifiers)
try:
self.stop_session_callback()
except TypeError:
msg = "DelegatingSession has no stop_session_callback set."
self._logger.debug(msg)
@property
def internal_attribute_keys(self):
return self.session_manager.get_internal_attribute_keys(self.session_key)
def get_internal_attribute(self, attribute_key):
return self.session_manager.get_internal_attribute(
self.session_key,
attribute_key
)
def get_internal_attributes(self):
return self.session_manager.get_internal_attributes(self.session_key)
def set_internal_attribute(self, attribute_key, value=None):
# unlike shiro, yosai doesn't support removing keys when value is None
self.session_manager.set_internal_attribute(
self.session_key,
attribute_key,
value
)
def set_internal_attributes(self, key_values):
# unlike shiro, yosai doesn't support removing keys when value is None
self.session_manager.set_internal_attributes(self.session_key, key_values)
def remove_internal_attribute(self, attribute_key):
return self.session_manager.remove_internal_attribute(self.session_key,
attribute_key)
def remove_internal_attributes(self, to_remove):
return self.session_manager.remove_internal_attributes(self.session_key,
to_remove)
@property
def attribute_keys(self):
return self.session_manager.get_attribute_keys(self.session_key)
def get_attribute(self, attribute_key):
if attribute_key:
return self.session_manager.get_attribute(self.session_key, attribute_key)
return None
def get_attributes(self, attribute_keys):
if attribute_keys:
return self.session_manager.get_attributes(self.session_key, attribute_keys)
return None
def set_attribute(self, attribute_key, value):
if all([attribute_key, value]):
self.session_manager.set_attribute(
self.session_key,
attribute_key,
value
)
def set_attributes(self, attributes):
if attributes:
self.session_manager.set_attributes(self.session_key, attributes)
def remove_attribute(self, attribute_key):
if attribute_key:
return self.session_manager.remove_attribute(self.session_key, attribute_key)
def remove_attributes(self, attribute_keys):
if attribute_keys:
return self.session_manager.remove_attributes(self.session_key, attribute_keys)
def __repr__(self):
return "{0}(session_id: {1})".format(self.__class__.__name__, self.session_id)
class NativeSessionManager(session_abcs.NativeSessionManager):
"""
Yosai's NativeSessionManager represents a massive refactoring of Shiro's
SessionManager object model. The refactoring is an ongoing effort to
replace a confusing inheritance-based mixin object graph with a compositional
design. This compositional design continues to evolve. Event handling can be
better designed as it currently is done by the manager AND session handler.
Pull Requests are welcome.
Touching Sessions
------------------
A session's last_access_time must be updated on every request. Updating
the last access timestamp is required for session validation to work
correctly as the timestamp is used to determine whether a session has timed
out due to inactivity.
In web applications, the [Shiro Filter] updates the session automatically
via the session.touch() method. For non-web environments (e.g. for RMI),
something else must call the touch() method to ensure the session
validation logic functions correctly.
"""
def __init__(self, settings, session_handler=NativeSessionHandler()):
session_settings = SessionSettings(settings)
self.absolute_timeout = session_settings.absolute_timeout
self.idle_timeout = session_settings.idle_timeout
self.event_bus = None
self.session_handler = session_handler
self._logger = get_logger()
def apply_cache_handler(self, cachehandler):
# no need for a local instance, just pass through
self.session_handler.session_store.cache_handler = cachehandler
def apply_event_bus(self, event_bus):
self.session_handler.event_bus = event_bus
self.event_bus = event_bus
# -------------------------------------------------------------------------
# Session Lifecycle Methods
# -------------------------------------------------------------------------
def start(self, session_context):
"""
unlike shiro, yosai does not apply session timeouts from within the
start method of the SessionManager but rather defers timeout settings
responsibilities to the SimpleSession, which uses session_settings
"""
session = self._create_session(session_context)
self.session_handler.on_start(session, session_context)
mysession = session_tuple(None, session.session_id)
self.notify_event(mysession, 'SESSION.START')
# Don't expose the EIS-tier Session object to the client-tier, but
# rather a DelegatingSession:
return self.create_exposed_session(session=session, context=session_context)
def stop(self, session_key, identifiers):
# type: (SessionKey, str) -> None
session = self._lookup_required_session(session_key)
try:
msg = "Stopping session with id [{0}]".format(session.session_id)
self._logger.debug(msg)
session.stop()
self.session_handler.on_stop(session, session_key)
idents = session.get_internal_attribute('identifiers_session_key')
if not idents:
idents = identifiers
mysession = session_tuple(idents, session_key.session_id)
self.notify_event(mysession, 'SESSION.STOP')
except InvalidSessionException:
raise
finally:
# DG: this results in a redundant delete operation (from shiro).
self.session_handler.after_stopped(session)
# -------------------------------------------------------------------------
# Session Creation Methods
# -------------------------------------------------------------------------
# consolidated with do_create_session:
def _create_session(self, session_context):
session = SimpleSession(self.absolute_timeout,
self.idle_timeout,
host=session_context.get('host'))
msg = "Creating session. "
self._logger.debug(msg)
sessionid = self.session_handler.create_session(session)
if not sessionid: # new to yosai
msg = 'Failed to obtain a sessionid while creating session.'
raise ValueError(msg)
return session
# yosai.core.introduces the keyword parameterization
def create_exposed_session(self, session, key=None, context=None):
# shiro ignores key and context parameters
return DelegatingSession(self, SessionKey(session.session_id))
# -------------------------------------------------------------------------
# Session Lookup Methods
# -------------------------------------------------------------------------
# called by mgt.ApplicationSecurityManager:
def get_session(self, key):
"""
:returns: DelegatingSession
"""
# a SimpleSession:
session = self.session_handler.do_get_session(key)
if session:
return self.create_exposed_session(session, key)
else:
return None
# called internally:
def _lookup_required_session(self, key):
# type: (SessionKey) -> SimpleSession
"""
:returns: SimpleSession
"""
session = self.session_handler.do_get_session(key)
if not session:
msg = ("Unable to locate required Session instance based "
"on session_key [{}].").format(str(key))
raise ValueError(msg)
return session
def is_valid(self, session_key):
"""
if the session doesn't exist, _lookup_required_session raises
"""
try:
self.check_valid(session_key)
return True
except InvalidSessionException:
return False
def check_valid(self, session_key):
return self._lookup_required_session(session_key)
def get_start_timestamp(self, session_key):
return self._lookup_required_session(session_key).start_timestamp
def get_last_access_time(self, session_key):
return self._lookup_required_session(session_key).last_access_time
def get_absolute_timeout(self, session_key):
return self._lookup_required_session(session_key).absolute_timeout
def get_idle_timeout(self, session_key):
return self._lookup_required_session(session_key).idle_timeout
def set_idle_timeout(self, session_key, idle_time):
session = self._lookup_required_session(session_key)
session.idle_timeout = idle_time
self.session_handler.on_change(session)
def set_absolute_timeout(self, session_key, absolute_time):
session = self._lookup_required_session(session_key)
session.absolute_timeout = absolute_time
self.session_handler.on_change(session)
def touch(self, session_key):
session = self._lookup_required_session(session_key)
session.touch()
self.session_handler.on_change(session)
def get_host(self, session_key):
return self._lookup_required_session(session_key).host
def get_internal_attribute_keys(self, session_key):
session = self._lookup_required_session(session_key)
collection = session.internal_attribute_keys
try:
return tuple(collection)
except TypeError: # collection is None
return tuple()
def get_internal_attribute(self, session_key, attribute_key):
return self._lookup_required_session(session_key). \
get_internal_attribute(attribute_key)
def get_internal_attributes(self, session_key):
return self._lookup_required_session(session_key).internal_attributes
def set_internal_attribute(self, session_key, attribute_key, value=None):
session = self._lookup_required_session(session_key)
session.set_internal_attribute(attribute_key, value)
self.session_handler.on_change(session)
def set_internal_attributes(self, session_key, key_values):
session = self._lookup_required_session(session_key)
session.set_internal_attributes(key_values)
self.session_handler.on_change(session)
def remove_internal_attribute(self, session_key, attribute_key):
session = self._lookup_required_session(session_key)
removed = session.remove_internal_attribute(attribute_key)
if removed:
self.session_handler.on_change(session)
return removed
def remove_internal_attributes(self, session_key, to_remove):
session = self._lookup_required_session(session_key)
removed = session.remove_internal_attributes(to_remove)
if removed:
self.session_handler.on_change(session)
return removed
def get_attribute_keys(self, session_key):
collection = self._lookup_required_session(session_key).attribute_keys
try:
return tuple(collection)
except TypeError: # collection is None
return tuple()
def get_attribute(self, session_key, attribute_key):
return self._lookup_required_session(session_key). \
get_attribute(attribute_key)
def get_attributes(self, session_key, attribute_keys):
"""
:param session_key:
:type attribute_keys: a list of strings
"""
return self._lookup_required_session(session_key). \
get_attributes(attribute_keys)
def set_attribute(self, session_key, attribute_key, value=None):
if value is None:
self.remove_attribute(session_key, attribute_key)
else:
session = self._lookup_required_session(session_key)
session.set_attribute(attribute_key, value)
self.session_handler.on_change(session)
# new to yosai
def set_attributes(self, session_key, attributes):
# type: (SessionKey, Dict) -> None
session = self._lookup_required_session(session_key)
session.set_attributes(attributes)
self.session_handler.on_change(session)
def remove_attribute(self, session_key, attribute_key):
session = self._lookup_required_session(session_key)
removed = session.remove_attribute(attribute_key)
if removed is not None:
self.session_handler.on_change(session)
return removed
def remove_attributes(self, session_key, attribute_keys):
"""
:param session_key:
:type attribute_keys: a list of strings
"""
session = self._lookup_required_session(session_key)
removed = session.remove_attributes(attribute_keys)
if removed:
self.session_handler.on_change(session)
return removed
def notify_event(self, session, topic):
try:
self.event_bus.send_message(topic, items=session)
except AttributeError:
msg = "Could not publish {} event".format(topic)
raise AttributeError(msg)
|
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import argparse
from oslo.serialization import jsonutils
from neutronclient.common import exceptions
from neutronclient.common import utils
from neutronclient.i18n import _
from neutronclient.neutron import v2_0 as neutronV20
def _format_fixed_ips(port):
try:
return '\n'.join([jsonutils.dumps(ip) for ip in port['fixed_ips']])
except (TypeError, KeyError):
return ''
def _format_fixed_ips_csv(port):
try:
return jsonutils.dumps(port['fixed_ips'])
except (TypeError, KeyError):
return ''
def _add_updatable_args(parser):
parser.add_argument(
'--name',
help=_('Name of this port.'))
parser.add_argument(
'--fixed-ip', metavar='subnet_id=SUBNET,ip_address=IP_ADDR',
action='append',
help=_('Desired IP and/or subnet for this port: '
'subnet_id=<name_or_id>,ip_address=<ip>. '
'You can repeat this option.'))
parser.add_argument(
'--fixed_ip',
action='append',
help=argparse.SUPPRESS)
parser.add_argument(
'--device-id',
help=_('Device ID of this port.'))
parser.add_argument(
'--device_id',
help=argparse.SUPPRESS)
parser.add_argument(
'--device-owner',
help=_('Device owner of this port.'))
parser.add_argument(
'--device_owner',
help=argparse.SUPPRESS)
def _updatable_args2body(parsed_args, body, client):
if parsed_args.device_id:
body['port'].update({'device_id': parsed_args.device_id})
if parsed_args.device_owner:
body['port'].update({'device_owner': parsed_args.device_owner})
if parsed_args.name:
body['port'].update({'name': parsed_args.name})
ips = []
if parsed_args.fixed_ip:
for ip_spec in parsed_args.fixed_ip:
ip_dict = utils.str2dict(ip_spec)
if 'subnet_id' in ip_dict:
subnet_name_id = ip_dict['subnet_id']
_subnet_id = neutronV20.find_resourceid_by_name_or_id(
client, 'subnet', subnet_name_id)
ip_dict['subnet_id'] = _subnet_id
ips.append(ip_dict)
if ips:
body['port'].update({'fixed_ips': ips})
class ListPort(neutronV20.ListCommand):
"""List ports that belong to a given tenant."""
resource = 'port'
_formatters = {'fixed_ips': _format_fixed_ips, }
_formatters_csv = {'fixed_ips': _format_fixed_ips_csv, }
list_columns = ['id', 'name', 'mac_address', 'fixed_ips']
pagination_support = True
sorting_support = True
class ListRouterPort(neutronV20.ListCommand):
"""List ports that belong to a given tenant, with specified router."""
resource = 'port'
_formatters = {'fixed_ips': _format_fixed_ips, }
list_columns = ['id', 'name', 'mac_address', 'fixed_ips']
pagination_support = True
sorting_support = True
def get_parser(self, prog_name):
parser = super(ListRouterPort, self).get_parser(prog_name)
parser.add_argument(
'id', metavar='router',
help=_('ID or name of router to look up.'))
return parser
def get_data(self, parsed_args):
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.id)
self.values_specs.append('--device_id=%s' % _id)
return super(ListRouterPort, self).get_data(parsed_args)
class ShowPort(neutronV20.ShowCommand):
"""Show information of a given port."""
resource = 'port'
class UpdatePortSecGroupMixin(object):
def add_arguments_secgroup(self, parser):
group_sg = parser.add_mutually_exclusive_group()
group_sg.add_argument(
'--security-group', metavar='SECURITY_GROUP',
default=[], action='append', dest='security_groups',
help=_('Security group associated with the port. You can '
'repeat this option.'))
group_sg.add_argument(
'--no-security-groups',
action='store_true',
help=_('Associate no security groups with the port.'))
def _resolv_sgid(self, secgroup):
return neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'security_group', secgroup)
def args2body_secgroup(self, parsed_args, port):
if parsed_args.security_groups:
port['security_groups'] = [self._resolv_sgid(sg) for sg
in parsed_args.security_groups]
elif parsed_args.no_security_groups:
port['security_groups'] = []
class UpdateExtraDhcpOptMixin(object):
def add_arguments_extradhcpopt(self, parser):
group_sg = parser.add_mutually_exclusive_group()
group_sg.add_argument(
'--extra-dhcp-opt',
default=[],
action='append',
dest='extra_dhcp_opts',
help=_('Extra dhcp options to be assigned to this port: '
'opt_name=<dhcp_option_name>,opt_value=<value>,'
'ip_version={4,6}. You can repeat this option.'))
def args2body_extradhcpopt(self, parsed_args, port):
ops = []
if parsed_args.extra_dhcp_opts:
# the extra_dhcp_opt params (opt_name & opt_value)
# must come in pairs, if there is a parm error
# both must be thrown out.
opt_ele = {}
edo_err_msg = _("Invalid --extra-dhcp-opt option, can only be: "
"opt_name=<dhcp_option_name>,opt_value=<value>,"
"ip_version={4,6}. "
"You can repeat this option.")
for opt in parsed_args.extra_dhcp_opts:
opt_ele.update(utils.str2dict(opt))
if ('opt_name' in opt_ele and
('opt_value' in opt_ele or 'ip_version' in opt_ele)):
if opt_ele.get('opt_value') == 'null':
opt_ele['opt_value'] = None
ops.append(opt_ele)
opt_ele = {}
else:
raise exceptions.CommandError(edo_err_msg)
if ops:
port.update({'extra_dhcp_opts': ops})
class CreatePort(neutronV20.CreateCommand, UpdatePortSecGroupMixin,
UpdateExtraDhcpOptMixin):
"""Create a port for a given tenant."""
resource = 'port'
def add_known_arguments(self, parser):
_add_updatable_args(parser)
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help=_('Set admin state up to false.'))
parser.add_argument(
'--admin_state_down',
dest='admin_state', action='store_false',
help=argparse.SUPPRESS)
parser.add_argument(
'--mac-address',
help=_('MAC address of this port.'))
parser.add_argument(
'--mac_address',
help=argparse.SUPPRESS)
self.add_arguments_secgroup(parser)
self.add_arguments_extradhcpopt(parser)
parser.add_argument(
'network_id', metavar='NETWORK',
help=_('Network ID or name this port belongs to.'))
def args2body(self, parsed_args):
client = self.get_client()
_network_id = neutronV20.find_resourceid_by_name_or_id(
client, 'network', parsed_args.network_id)
body = {'port': {'admin_state_up': parsed_args.admin_state,
'network_id': _network_id, }, }
_updatable_args2body(parsed_args, body, client)
if parsed_args.mac_address:
body['port'].update({'mac_address': parsed_args.mac_address})
if parsed_args.tenant_id:
body['port'].update({'tenant_id': parsed_args.tenant_id})
self.args2body_secgroup(parsed_args, body['port'])
self.args2body_extradhcpopt(parsed_args, body['port'])
return body
class DeletePort(neutronV20.DeleteCommand):
"""Delete a given port."""
resource = 'port'
class UpdatePort(neutronV20.UpdateCommand, UpdatePortSecGroupMixin,
UpdateExtraDhcpOptMixin):
"""Update port's information."""
resource = 'port'
def add_known_arguments(self, parser):
_add_updatable_args(parser)
parser.add_argument(
'--admin-state-up',
choices=['True', 'False'],
help=_('Set admin state up for the port.'))
parser.add_argument(
'--admin_state_up',
choices=['True', 'False'],
help=argparse.SUPPRESS)
self.add_arguments_secgroup(parser)
self.add_arguments_extradhcpopt(parser)
def args2body(self, parsed_args):
body = {'port': {}}
client = self.get_client()
_updatable_args2body(parsed_args, body, client)
if parsed_args.admin_state_up:
body['port'].update({'admin_state_up':
parsed_args.admin_state_up})
self.args2body_secgroup(parsed_args, body['port'])
self.args2body_extradhcpopt(parsed_args, body['port'])
return body
|
|
"""
This module is for inspecting OGR data sources and generating either
models for GeoDjango and/or mapping dictionaries for use with the
`LayerMapping` utility.
Author: Travis Pinney, Dane Springmeyer, & Justin Bronn
"""
from itertools import izip
# Requires GDAL to use.
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.gdal.field import OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False):
"""
Given a DataSource, generates a dictionary that may be used
for invoking the LayerMapping utility.
Keyword Arguments:
`geom_name` => The name of the geometry field to use for the model.
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
"""
if isinstance(data_source, basestring):
# Instantiating the DataSource from the string.
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Creating the dictionary.
_mapping = {}
# Generating the field name for each field in the layer.
for field in data_source[layer_key].fields:
mfield = field.lower()
if mfield[-1:] == '_': mfield += 'field'
_mapping[mfield] = field
gtype = data_source[layer_key].geom_type
if multi_geom and gtype.num in (1, 2, 3): prefix = 'MULTI'
else: prefix = ''
_mapping[geom_name] = prefix + str(gtype).upper()
return _mapping
def ogrinspect(*args, **kwargs):
"""
Given a data source (either a string or a DataSource object) and a string
model name this function will generate a GeoDjango model.
Usage:
>>> from django.contrib.gis.utils import ogrinspect
>>> ogrinspect('/path/to/shapefile.shp','NewModel')
...will print model definition to stout
or put this in a python script and use to redirect the output to a new
model like:
$ python generate_model.py > myapp/models.py
# generate_model.py
from django.contrib.gis.utils import ogrinspect
shp_file = 'data/mapping_hacks/world_borders.shp'
model_name = 'WorldBorders'
print ogrinspect(shp_file, model_name, multi_geom=True, srid=4326,
geom_name='shapes', blank=True)
Required Arguments
`datasource` => string or DataSource object to file pointer
`model name` => string of name of new model class to create
Optional Keyword Arguments
`geom_name` => For specifying the model name for the Geometry Field.
Otherwise will default to `geom`
`layer_key` => The key for specifying which layer in the DataSource to use;
defaults to 0 (the first layer). May be an integer index or a string
identifier for the layer.
`srid` => The SRID to use for the Geometry Field. If it can be determined,
the SRID of the datasource is used.
`multi_geom` => Boolean (default: False) - specify as multigeometry.
`name_field` => String - specifies a field name to return for the
`__unicode__` function (which will be generated if specified).
`imports` => Boolean (default: True) - set to False to omit the
`from django.contrib.gis.db import models` code from the
autogenerated models thus avoiding duplicated imports when building
more than one model by batching ogrinspect()
`decimal` => Boolean or sequence (default: False). When set to True
all generated model fields corresponding to the `OFTReal` type will
be `DecimalField` instead of `FloatField`. A sequence of specific
field names to generate as `DecimalField` may also be used.
`blank` => Boolean or sequence (default: False). When set to True all
generated model fields will have `blank=True`. If the user wants to
give specific fields to have blank, then a list/tuple of OGR field
names may be used.
`null` => Boolean (default: False) - When set to True all generated
model fields will have `null=True`. If the user wants to specify
give specific fields to have null, then a list/tuple of OGR field
names may be used.
Note: This routine calls the _ogrinspect() helper to do the heavy lifting.
"""
return '\n'.join(s for s in _ogrinspect(*args, **kwargs))
def _ogrinspect(data_source, model_name, geom_name='geom', layer_key=0, srid=None,
multi_geom=False, name_field=None, imports=True,
decimal=False, blank=False, null=False):
"""
Helper routine for `ogrinspect` that generates GeoDjango models corresponding
to the given data source. See the `ogrinspect` docstring for more details.
"""
# Getting the DataSource
if isinstance(data_source, str):
data_source = DataSource(data_source)
elif isinstance(data_source, DataSource):
pass
else:
raise TypeError('Data source parameter must be a string or a DataSource object.')
# Getting the layer corresponding to the layer key and getting
# a string listing of all OGR fields in the Layer.
layer = data_source[layer_key]
ogr_fields = layer.fields
# Creating lists from the `null`, `blank`, and `decimal`
# keyword arguments.
def process_kwarg(kwarg):
if isinstance(kwarg, (list, tuple)):
return [s.lower() for s in kwarg]
elif kwarg:
return [s.lower() for s in ogr_fields]
else:
return []
null_fields = process_kwarg(null)
blank_fields = process_kwarg(blank)
decimal_fields = process_kwarg(decimal)
# Gets the `null` and `blank` keywords for the given field name.
def get_kwargs_str(field_name):
kwlist = []
if field_name.lower() in null_fields: kwlist.append('null=True')
if field_name.lower() in blank_fields: kwlist.append('blank=True')
if kwlist: return ', ' + ', '.join(kwlist)
else: return ''
# For those wishing to disable the imports.
if imports:
yield '# This is an auto-generated Django model module created by ogrinspect.'
yield 'from django.contrib.gis.db import models'
yield ''
yield 'class %s(models.Model):' % model_name
for field_name, width, precision, field_type in izip(ogr_fields, layer.field_widths, layer.field_precisions, layer.field_types):
# The model field name.
mfield = field_name.lower()
if mfield[-1:] == '_': mfield += 'field'
# Getting the keyword args string.
kwargs_str = get_kwargs_str(field_name)
if field_type is OFTReal:
# By default OFTReals are mapped to `FloatField`, however, they
# may also be mapped to `DecimalField` if specified in the
# `decimal` keyword.
if field_name.lower() in decimal_fields:
yield ' %s = models.DecimalField(max_digits=%d, decimal_places=%d%s)' % (mfield, width, precision, kwargs_str)
else:
yield ' %s = models.FloatField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTInteger:
yield ' %s = models.IntegerField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTString:
yield ' %s = models.CharField(max_length=%s%s)' % (mfield, width, kwargs_str)
elif field_type is OFTDate:
yield ' %s = models.DateField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDateTime:
yield ' %s = models.DateTimeField(%s)' % (mfield, kwargs_str[2:])
elif field_type is OFTDate:
yield ' %s = models.TimeField(%s)' % (mfield, kwargs_str[2:])
else:
raise TypeError('Unknown field type %s in %s' % (field_type, mfield))
# TODO: Autodetection of multigeometry types (see #7218).
gtype = layer.geom_type
if multi_geom and gtype.num in (1, 2, 3):
geom_field = 'Multi%s' % gtype.django
else:
geom_field = gtype.django
# Setting up the SRID keyword string.
if srid is None:
if layer.srs is None:
srid_str = 'srid=-1'
else:
srid = layer.srs.srid
if srid is None:
srid_str = 'srid=-1'
elif srid == 4326:
# WGS84 is already the default.
srid_str = ''
else:
srid_str = 'srid=%s' % srid
else:
srid_str = 'srid=%s' % srid
yield ' %s = models.%s(%s)' % (geom_name, geom_field, srid_str)
yield ' objects = models.GeoManager()'
if name_field:
yield ''
yield ' def __unicode__(self): return self.%s' % name_field
|
|
import distutils
import sys
import subprocess
import re
import os
import difflib
from functools import wraps
from pkg_resources import resource_filename
from io import StringIO
from collections import namedtuple
from contextlib import contextmanager
import numpy
import pandas
import pytest
def get_img_tolerance():
return int(os.environ.get("MPL_IMGCOMP_TOLERANCE", 15))
def seed(func):
""" Decorator to seed the RNG before any function. """
@wraps(func)
def wrapper(*args, **kwargs):
numpy.random.seed(0)
return func(*args, **kwargs)
return wrapper
def raises(error):
"""Wrapper around pytest.raises to support None."""
if error:
return pytest.raises(error)
else:
@contextmanager
def not_raises():
try:
yield
except Exception as e:
raise e
return not_raises()
def requires(module, modulename):
def outer_wrapper(function):
@wraps(function)
def inner_wrapper(*args, **kwargs):
if module is None:
raise RuntimeError(
"{} required for `{}`".format(modulename, function.__name__)
)
else:
return function(*args, **kwargs)
return inner_wrapper
return outer_wrapper
@seed
def make_dc_data(ndval="ND", rescol="res", qualcol="qual"):
dl_map = {
"A": 0.1,
"B": 0.2,
"C": 0.3,
"D": 0.4,
"E": 0.1,
"F": 0.2,
"G": 0.3,
"H": 0.4,
}
index = pandas.MultiIndex.from_product(
[
list("ABCDEFGH"),
list("1234567"),
["GA", "AL", "OR", "CA"],
["Inflow", "Outflow", "Reference"],
],
names=["param", "bmp", "state", "loc"],
)
array = numpy.random.lognormal(mean=0.75, sigma=1.25, size=len(index))
data = pandas.DataFrame(data=array, index=index, columns=[rescol])
data["DL"] = data.apply(lambda r: dl_map.get(r.name[0]), axis=1)
data[rescol] = data.apply(
lambda r: dl_map.get(r.name[0]) if r[rescol] < r["DL"] else r[rescol], axis=1
)
data[qualcol] = data.apply(lambda r: ndval if r[rescol] <= r["DL"] else "=", axis=1)
return data
@seed
def make_dc_data_complex(dropsome=True):
dl_map = {
"A": 0.25,
"B": 0.50,
"C": 0.10,
"D": 1.00,
"E": 0.25,
"F": 0.50,
"G": 0.10,
"H": 1.00,
}
index = pandas.MultiIndex.from_product(
[
list("ABCDEFGH"),
list("1234567"),
["GA", "AL", "OR", "CA"],
["Inflow", "Outflow", "Reference"],
],
names=["param", "bmp", "state", "loc"],
)
xtab = (
pandas.DataFrame(index=index, columns=["res"])
.unstack(level="param")
.unstack(level="state")
)
xtab_rows = xtab.shape[0]
for c in xtab.columns:
mu = numpy.random.uniform(low=-1.7, high=2)
sigma = numpy.random.uniform(low=0.1, high=2)
xtab[c] = numpy.random.lognormal(mean=mu, sigma=sigma, size=xtab_rows)
data = xtab.stack(level="state").stack(level="param")
data["DL"] = data.apply(lambda r: dl_map.get(r.name[-1]), axis=1)
data["res"] = data.apply(
lambda r: dl_map.get(r.name[-1]) if r["res"] < r["DL"] else r["res"], axis=1
)
data["qual"] = data.apply(lambda r: "<" if r["res"] <= r["DL"] else "=", axis=1)
if dropsome:
if int(dropsome) == 1:
dropsome = 0.25
index = numpy.random.uniform(size=data.shape[0]) >= dropsome
data = data.loc[index]
return data
def comp_statfxn(x, y):
stat = namedtuple("teststat", ("statistic", "pvalue"))
result = x.max() - y.min()
return stat(result, result * 0.25)
def test_data_path(filename):
path = resource_filename("wqio.tests._data", filename)
return path
def getTestROSData():
"""
Generates test data for an ROS estimate.
Input:
None
Output:
Structured array with the values (results or DLs) and qualifers
(blank or "ND" for non-detects)
"""
raw_csv = StringIO(
"res,qual\n2.00,=\n4.20,=\n4.62,=\n5.00,ND\n5.00,ND\n5.50,ND\n"
"5.57,=\n5.66,=\n5.75,ND\n5.86,=\n6.65,=\n6.78,=\n6.79,=\n7.50,=\n"
"7.50,=\n7.50,=\n8.63,=\n8.71,=\n8.99,=\n9.50,ND\n9.50,ND\n9.85,=\n"
"10.82,=\n11.00,ND\n11.25,=\n11.25,=\n12.20,=\n14.92,=\n16.77,=\n"
"17.81,=\n19.16,=\n19.19,=\n19.64,=\n20.18,=\n22.97,=\n"
)
return pandas.read_csv(raw_csv)
def compare_versions(utility="latex"): # pragma: no cover
"return True if a is greater than or equal to b"
requirements = {"latex": "3.1415"}
available = {"latex": checkdep_tex()}
required = requirements[utility]
present = available[utility]
if present:
present = distutils.version.LooseVersion(present)
required = distutils.version.LooseVersion(required)
if present >= required:
return True
else:
return False
else:
return False
def _show_package_info(package, name): # pragma: no cover
packagedir = os.path.dirname(package.__file__)
print("%s version %s is installed in %s" % (name, package.__version__, packagedir))
def _show_system_info(): # pragma: no cover
import pytest
pyversion = sys.version.replace("\n", "")
print("Python version %s" % pyversion)
print("pytest version %d.%d.%d" % pytest.__versioninfo__)
import numpy
_show_package_info(numpy, "numpy")
import scipy
_show_package_info(scipy, "scipy")
import matplotlib
_show_package_info(matplotlib, "matplotlib")
import statsmodels
_show_package_info(statsmodels, "statsmodels")
import pandas
_show_package_info(pandas, "pandas")
def checkdep_tex(): # pragma: no cover
if sys.version_info[0] >= 3:
def byte2str(b):
return b.decode("ascii")
else: # pragma: no cover
def byte2str(b):
return b
try:
s = subprocess.Popen(
["tex", "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
line = byte2str(s.stdout.readlines()[0])
pattern = "3\.1\d+"
match = re.search(pattern, line)
v = match.group(0)
return v
except (IndexError, ValueError, AttributeError, OSError):
return None
def assert_bigstring_equal(
input_string, known_string, input_out=None, known_out=None
): # pragma: no cover
if input_string != known_string:
if input_out and known_out:
with open(input_out, "w") as fi:
fi.write(input_string)
with open(known_out, "w") as fo:
fo.write(known_string)
message = "".join(
difflib.ndiff(input_string.splitlines(True), known_string.splitlines(True))
)
raise AssertionError("Multi-line strings are unequal:\n" + message)
|
|
import attr
import nltk
import spacy
from collections import OrderedDict
from functools import partial
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag
from nltk.corpus import wordnet as wn
from pywsd.lesk import simple_lesk as disambiguate
from typos import typos
nlp = spacy.load('en')
# Penn TreeBank POS tags:
# http://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
supported_pos_tags = [
# 'CC', # coordinating conjunction
# 'CD', # Cardinal number
# 'DT', # Determiner
# 'EX', # Existential there
# 'FW', # Foreign word
# 'IN', # Preposition or subordinating conjunction
'JJ', # Adjective
# 'JJR', # Adjective, comparative
# 'JJS', # Adjective, superlative
# 'LS', # List item marker
# 'MD', # Modal
'NN', # Noun, singular or mass
'NNS', # Noun, plural
'NNP', # Proper noun, singular
'NNPS', # Proper noun, plural
# 'PDT', # Predeterminer
# 'POS', # Possessive ending
# 'PRP', # Personal pronoun
# 'PRP$', # Possessive pronoun
'RB', # Adverb
# 'RBR', # Adverb, comparative
# 'RBS', # Adverb, superlative
# 'RP', # Particle
# 'SYM', # Symbol
# 'TO', # to
# 'UH', # Interjection
'VB', # Verb, base form
'VBD', # Verb, past tense
'VBG', # Verb, gerund or present participle
'VBN', # Verb, past participle
'VBP', # Verb, non-3rd person singular present
'VBZ', # Verb, 3rd person singular present
# 'WDT', # Wh-determiner
# 'WP', # Wh-pronoun
# 'WP$', # Possessive wh-pronoun
# 'WRB', # Wh-adverb
]
@attr.s
class SubstitutionCandidate:
token_position = attr.ib()
similarity_rank = attr.ib()
original_token = attr.ib()
candidate_word = attr.ib()
def vsm_similarity(doc, original, synonym):
window_size = 3
start = max(0, original.i - window_size)
return doc[start: original.i + window_size].similarity(synonym)
def _get_wordnet_pos(spacy_token):
'''Wordnet POS tag'''
pos = spacy_token.tag_[0].lower()
if pos in ['a', 'n', 'v']:
return pos
def _synonym_prefilter_fn(token, synonym):
'''
Similarity heuristics go here
'''
if (len(synonym.text.split()) > 2) or \
(synonym.lemma == token.lemma) or \
(synonym.tag != token.tag) or \
(token.text.lower() == 'be'):
return False
else:
return True
def _generate_synonym_candidates(doc, disambiguate=False, rank_fn=None):
'''
Generate synonym candidates.
For each token in the doc, the list of WordNet synonyms is expanded.
the synonyms are then ranked by their GloVe similarity to the original
token and a context window around the token.
:param disambiguate: Whether to use lesk sense disambiguation before
expanding the synonyms.
:param rank_fn: Functions that takes (doc, original_token, synonym) and
returns a similarity score
'''
if rank_fn is None:
rank_fn=vsm_similarity
candidates = []
for position, token in enumerate(doc):
if token.tag_ in supported_pos_tags:
wordnet_pos = _get_wordnet_pos(token)
wordnet_synonyms = []
if disambiguate:
try:
synset = disambiguate(
doc.text, token.text, pos=wordnet_pos)
wordnet_synonyms = synset.lemmas()
except:
continue
else:
synsets = wn.synsets(token.text, pos=wordnet_pos)
for synset in synsets:
wordnet_synonyms.extend(synset.lemmas())
synonyms = []
for wordnet_synonym in wordnet_synonyms:
spacy_synonym = nlp(wordnet_synonym.name().replace('_', ' '))[0]
synonyms.append(spacy_synonym)
synonyms = filter(partial(_synonym_prefilter_fn, token),
synonyms)
synonyms = reversed(sorted(synonyms,
key=partial(rank_fn, doc, token)))
for rank, synonym in enumerate(synonyms):
candidate_word = synonym.text
candidate = SubstitutionCandidate(
token_position=position,
similarity_rank=rank,
original_token=token,
candidate_word=candidate_word)
candidates.append(candidate)
return candidates
def _generate_typo_candidates(doc, min_token_length=4, rank=1000):
candidates = []
for position, token in enumerate(doc):
if (len(token)) < min_token_length:
continue
for typo in typos(token.text):
candidate = SubstitutionCandidate(
token_position=position,
similarity_rank=rank,
original_token=token,
candidate_word=typo)
candidates.append(candidate)
return candidates
def _compile_perturbed_tokens(doc, accepted_candidates):
'''
Traverse the list of accepted candidates and do the token substitutions.
'''
candidate_by_position = {}
for candidate in accepted_candidates:
candidate_by_position[candidate.token_position] = candidate
final_tokens = []
for position, token in enumerate(doc):
word = token.text
if position in candidate_by_position:
candidate = candidate_by_position[position]
word = candidate.candidate_word.replace('_', ' ')
final_tokens.append(word)
return final_tokens
def perturb_text(
doc,
use_typos=True,
rank_fn=None,
heuristic_fn=None,
halt_condition_fn=None,
verbose=False):
'''
Perturb the text by replacing some words with their WordNet synonyms,
sorting by GloVe similarity between the synonym and the original context
window, and optional heuristic.
:param doc: Document to perturb.
:type doc: spacy.tokens.doc.Doc
:param rank_fn: See `_generate_synonym_candidates``.
:param heuristic_fn: Ranks the best synonyms using the heuristic.
If the value of the heuristic is negative, the candidate
substitution is rejected.
:param halt_condition_fn: Returns true when the perturbation is
satisfactory enough.
:param verbose:
'''
heuristic_fn = heuristic_fn or (lambda _, candidate: candidate.similarity_rank)
halt_condition_fn = halt_condition_fn or (lambda perturbed_text: False)
candidates = _generate_synonym_candidates(doc, rank_fn=rank_fn)
if use_typos:
candidates.extend(_generate_typo_candidates(doc))
perturbed_positions = set()
accepted_candidates = []
perturbed_text = doc.text
if verbose:
print('Got {} candidates'.format(len(candidates)))
sorted_candidates = zip(
map(partial(heuristic_fn, perturbed_text), candidates),
candidates)
sorted_candidates = list(sorted(sorted_candidates,
key=lambda t: t[0]))
while len(sorted_candidates) > 0 and not halt_condition_fn(perturbed_text):
score, candidate = sorted_candidates.pop()
if score < 0:
continue
if candidate.token_position not in perturbed_positions:
perturbed_positions.add(candidate.token_position)
accepted_candidates.append(candidate)
if verbose:
print('Candidate:', candidate)
print('Candidate score:', heuristic_fn(perturbed_text, candidate))
print('Candidate accepted.')
perturbed_text = ' '.join(
_compile_perturbed_tokens(doc, accepted_candidates))
if len(sorted_candidates) > 0:
_, candidates = zip(*sorted_candidates)
sorted_candidates = zip(
map(partial(heuristic_fn, perturbed_text),
candidates),
candidates)
sorted_candidates = list(sorted(sorted_candidates,
key=lambda t: t[0]))
return perturbed_text
if __name__ == '__main__':
texts = [
"Human understanding of nutrition for animals is improving. *Except* for the human animal. If only nutritionists thought humans were animals.",
"Theory: a climate change denialist has no more inherent right to a media platform than someone who insists the moon may be made of cheese.",
"Soft skills like sharing and negotiating will be crucial. He says the modern workplace, where people move between different roles and projects, closely resembles pre-school classrooms, where we learn social skills such as empathy and cooperation. Deming has mapped the changing needs of employers and identified key skills that will be required to thrive in the job market of the near future. Along with those soft skills, mathematical ability will be enormously beneficial."
]
def print_paraphrase(text):
print('Original text:', text)
doc = nlp(text)
perturbed_text = perturb_text(doc, verbose=True)
print('Perturbed text:', perturbed_text)
for text in texts:
print_paraphrase(text)
|
|
#!/usr/bin/env python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Django template tag library containing forms helpers.
"""
__authors__ = [
'"Mario Ferraro" <fadinlight@gmail.com>',
'"Todd Larsen" <tlarsen@google.com>',
'"Pawel Solyga" <pawel.solyga@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import re
from django import template
from django.forms import forms as forms_in
from django.template import defaultfilters as djangofilter
from django.utils import simplejson
from django.utils.encoding import force_unicode
from django.utils.html import escape
from soc.logic import accounts
from soc.logic import dicts
from soc.views.helper import widgets
register = template.Library()
@register.inclusion_tag('soc/templatetags/_as_user.html')
def as_user(user):
"""Prints a user as a hyperlinked link_id.
"""
return {'user': user}
@register.inclusion_tag('soc/templatetags/_as_email.html')
def as_email(account):
"""Prints a user as a hyperlinked link_id.
"""
denormalized = accounts.denormalizeAccount(account)
return {'email': denormalized.email()}
@register.inclusion_tag('soc/templatetags/_as_upload_form.html')
def as_upload_form(form, form_name, submit_button_text):
"""Prints an upload form.
"""
return {
'form': form,
'form_name': form_name,
'submit_button_text': submit_button_text,
}
@register.inclusion_tag('soc/templatetags/_field_as_table_row.html')
def field_as_table_row(field):
"""Prints a newforms field as a table row.
This function actually does very little, simply passing the supplied
form field instance in a simple context used by the _field_as_table_row.html
template (which is actually doing all of the work).
See soc/templates/soc/templatetags/_field_as_table_row.html for the CSS
styles used by this template tag.
Usage:
{% load forms_helpers %}
...
<table>
{% field_as_table_row form.fieldname %}
...
</table>
Args:
field: a Django newforms field instance
Returns:
a simple context containing the supplied newforms field instance:
{ 'field': field }
"""
return {'field': field}
@register.inclusion_tag('soc/templatetags/_readonly_field_as_table_row.html')
def readonly_field_as_table_row(label, value):
"""Prints a field value and it's verbose name as a table row.
This function actually does very little, simply passing the
supplied field_label and field_value in a simple context used by the
_readonly_field_as_table_row.html template (which is actually
doing all of the work).
See soc/templates/soc/templatetags/_readonly_field_as_table_row.html for
the CSS styles used by this template tag.
Usage:
{% load forms_helpers %}
...
<table>
{% readonly_field_as_table_row field_label field_value %}
...
</table>
Args:
field_label: label of the field to render
field_value: value of the field to render
Returns:
a simple context containing the supplied newforms field instance:
{ 'field_label': field_label',
'field_value': field_value'}
"""
value = value.strip() if isinstance(value, basestring) else value
return {'field_label': label,
'field_value': value}
@register.inclusion_tag(
'soc/templatetags/_readonly_field_as_twoline_table_row.html')
def readonly_field_as_twoline_table_row(label, value):
"""See readonly_field_as_table_row().
"""
value = value.strip() if isinstance(value, basestring) else value
return {'field_label': label,
'field_value': value}
@register.inclusion_tag('soc/templatetags/_readonly_field_as_table_row.html')
def readonly_date_field_as_table_row(label, value):
"""Prints a field value formatted as the given format string.
"""
import datetime
if isinstance(value, datetime.datetime):
if value.day % 10 == 1 and value.day != 11:
ord_suf = 'st'
elif value.day % 10 == 2 and value.day != 12:
ord_suf = 'nd'
elif value.day % 10 == 3 and value.day != 13:
ord_suf = 'rd'
else:
ord_suf = 'th'
fmt = "%d" + ord_suf + " %B %Y, %H:%M"
value = value.strftime(fmt)
return {'field_label': label,
'field_value': value}
@register.inclusion_tag(
'soc/templatetags/_readonly_url_field_as_table_row.html')
def readonly_url_field_as_table_row(field_label, field_value):
"""See readonly_field_as_table_row().
"""
return {'field_label': field_label,
'field_value': field_value}
@register.inclusion_tag(
'soc/templatetags/_readonly_url_field_as_twoline_table_row.html')
def readonly_url_field_as_twoline_table_row(field_label, field_value):
"""See readonly_field_as_table_row().
"""
return {'field_label': field_label,
'field_value': field_value}
@register.inclusion_tag(
'soc/templatetags/_readonly_email_field_as_table_row.html')
def readonly_email_field_as_table_row(field_label, field_value):
"""See readonly_field_as_table_row().
"""
return {'field_label': field_label,
'field_value': field_value}
@register.inclusion_tag(
'soc/templatetags/_readonly_safe_field_as_table_row.html')
def readonly_safe_field_as_table_row(field_label, field_value):
"""See readonly_field_as_table_row().
"""
return {'field_label': field_label,
'field_value': field_value}
@register.inclusion_tag(
'soc/templatetags/_readonly_safe_field_as_twoline_table_row.html')
def readonly_safe_field_as_twoline_table_row(field_label, field_value):
"""See readonly_field_as_table_row().
"""
return {'field_label': field_label,
'field_value': field_value}
@register.inclusion_tag('soc/templatetags/_as_readonly_table.html',
takes_context=True)
def as_readonly_table(context, form):
"""Outputs a form as a properly formatted html table.
Args:
form: the form that should be converted to a table
"""
# create the bound fields
fields = [forms_in.BoundField(form, field, name) for name, field in
form.fields.items() if field]
return {'fields': fields}
@register.inclusion_tag('soc/templatetags/_as_table.html', takes_context=True)
def as_table(context, form):
"""Outputs a form as a properly formatted html table.
Args:
form: the form that should be converted to a table
"""
return as_table_helper(context, form)
@register.inclusion_tag('soc/templatetags/_as_twoline_table.html',
takes_context=True)
def as_twoline_table(context, form):
"""Outputs a form as a properly formatted html table.
Args:
form: the form that should be converted to a table
"""
return as_table_helper(context, form)
def as_table_helper(context, form):
"""See as_table().
"""
fields = []
hidden_fields = []
hidden_fields_errors = []
errors = False
# entity = context['entity']
# support defining output order like in Django
a = form.Meta
if hasattr(form, 'Meta') and form.Meta.fields:
items = [(i, form.fields[i]) for i in form.Meta.fields]
else:
items = form.fields.items()
# Iterate over all fields and prepare it for adding
for name, field in items:
if not field:
continue
bf = forms_in.BoundField(form, field, name)
attrs = {}
if isinstance(field, widgets.ReferenceField):
attrs = field.rf
# If the field is hidden we display it elsewhere
if not bf.is_hidden:
if bf.errors:
errors = True
example_text = ''
group = '0. '
if hasattr(field, 'group'):
group = field.group
if hasattr(field, 'example_text'):
example_text = force_unicode(field.example_text)
item = {
'field': bf,
'required': field.required,
'example_text': example_text,
'group': group,
}
item.update(attrs)
fields.append(item)
else:
hidden_fields.append(unicode(bf))
for error in bf.errors:
item = (name, force_unicode(error))
hidden_fields_errors.append(item)
grouped = dicts.groupby(fields, 'group')
rexp = re.compile(r"\d+. ")
fields = [(rexp.sub('', key), grouped[key]) for key in sorted(grouped)]
context.update({
'top_errors': form.non_field_errors() or '',
'hidden_field_errors': hidden_fields_errors or '',
'errors': errors,
'groups': fields if fields else '',
'hidden_fields': hidden_fields or '',
'form': form,
'json_for_js': {},
})
return context
@register.inclusion_tag('soc/templatetags/_as_table_row.html',
takes_context=True)
def as_table_row(context, item):
"""Outputs a field as a properly formatted html row.
Args:
item: the item that is being rendered
"""
return as_table_row_helper(context, item)
@register.inclusion_tag('soc/templatetags/_as_twoline_table_row.html',
takes_context=True)
def as_twoline_table_row(context, item):
"""See as_table_row().
"""
return as_table_row_helper(context, item)
def as_table_row_helper(context, item):
"""See as_table_row().
"""
field = item['field']
required = item['required']
example_text = item['example_text']
form = context['form']
entity = context.get('entity', None)
reference = item.get('reference_url')
filter = item.get('filter')
filter_fields = item.get('filter_fields')
# Escape and cache in local variable.
errors = [force_unicode(escape(error)) for error in field.errors]
if reference:
from soc.views.helper import redirects
params = {
'url_name': reference,
}
args = {}
if entity:
for filter_field, filter_value in filter_fields.iteritems():
args[filter_field] = filter_value
for filter_field in (i for i in filter if hasattr(entity, i)):
args[filter_field] = getattr(entity, filter_field)
if '__scoped__' in filter:
args['scope_path'] = entity.key().id_or_name()
# TODO: replace this hack needed to get org-scoped mentor
# autocompletion on student proposals
if '__org__' in filter:
args['scope_path'] = entity.org.key().id_or_name()
# even if the entity is not available, it can still be necessary
# to access its potential scope path
elif 'scope_path' in filter and 'scope_path' in context:
args['scope_path'] = context['scope_path']
params['args'] = '&'.join(['%s=%s' % item for item in args.iteritems()])
select_url = redirects.getSelectRedirect(params)
if field.label:
label = escape(force_unicode(field.label))
# Only add the suffix if the label does not end in punctuation.
if form.label_suffix and (label[-1] not in ':?.!'):
label += form.label_suffix
label = field.label_tag(label) or ''
field_class_type = 'formfield%slabel' % ('error' if errors else '')
help_text = field.help_text
context['json_for_js'][field.auto_id] = {
'autocomplete':
djangofilter.safe(select_url) if reference else None,
'tooltip':
djangofilter.linebreaksbr(
force_unicode(help_text)
) if help_text else '',
}
context.update({
'help_text': force_unicode(help_text) if help_text else '',
'field_class_type': field_class_type,
'label': force_unicode(label) if field.label else '',
'field': field,
'field_id': field.auto_id,
'required': required,
'example_text': example_text,
'select_url': select_url if reference else None,
'errors': errors,
})
return context
@register.simple_tag
def tojson(json_dictionary):
json_string = simplejson.dumps(json_dictionary, ensure_ascii=False)
return json_string.replace('\"','"')
|
|
import socket
import tornado
import tornado.ioloop
import tornado.process
from .routing import RoundRobinRouter, PROTOCOL_HTTP, PROTOCOL_HTTPS
from pyrox.tstream.iostream import (SSLSocketIOHandler, SocketIOHandler,
StreamClosedError)
from pyrox.tstream.tcpserver import TCPServer
from pyrox.log import get_logger
from pyrox.about import VERSION
from pyrox.http import (HttpRequest, HttpResponse, RequestParser,
ResponseParser, ParserDelegate)
import traceback
_LOG = get_logger(__name__)
"""
100 Continue intermediate response
"""
_100_CONTINUE = b'HTTP/1.1 100 Continue\r\n\r\n'
"""
String representing a 0 length HTTP chunked encoding chunk.
"""
_CHUNK_CLOSE = b'0\r\n\r\n'
"""
Default return object on error. This should be configurable.
"""
_BAD_GATEWAY_RESP = HttpResponse()
_BAD_GATEWAY_RESP.version = b'1.1'
_BAD_GATEWAY_RESP.status = '502 Bad Gateway'
_BAD_GATEWAY_RESP.header('Server').values.append('pyrox/{}'.format(VERSION))
_BAD_GATEWAY_RESP.header('Content-Length').values.append('0')
"""
Default return object on no route or upstream not responding. This should
be configurable.
"""
_UPSTREAM_UNAVAILABLE = HttpResponse()
_UPSTREAM_UNAVAILABLE.version = b'1.1'
_UPSTREAM_UNAVAILABLE.status = '503 Service Unavailable'
_UPSTREAM_UNAVAILABLE.header(
'Server').values.append('pyrox/{}'.format(VERSION))
_UPSTREAM_UNAVAILABLE.header('Content-Length').values.append('0')
_MAX_CHUNK_SIZE = 16384
def _write_chunk_to_stream(stream, data, callback=None):
# Format and write this chunk
chunk = bytearray()
chunk.extend(hex(len(data))[2:])
chunk.extend('\r\n')
chunk.extend(data)
chunk.extend('\r\n')
stream.write(chunk, callback)
def _write_to_stream(stream, data, is_chunked, callback=None):
if is_chunked:
_write_chunk_to_stream(stream, data, callback)
else:
stream.write(data, callback)
class AccumulationStream(object):
def __init__(self):
self.data = None
self.reset()
def reset(self):
self.data = bytearray()
def write(self, data):
self.data.extend(data)
def size(self):
return len(self.data)
class ProxyHandler(ParserDelegate):
"""
Common class for the stream handlers. This parent class manages the
following:
- Handling of header field names.
- Tracking rejection of message sessions.
"""
def __init__(self, filter_pl, http_msg):
self._filter_pl = filter_pl
self._http_msg = http_msg
self._expect = None
self._chunked = False
self._last_header_field = None
self._intercepted = False
def on_http_version(self, major, minor):
self._http_msg.version = '{}.{}'.format(major, minor)
def on_header_field(self, field):
self._last_header_field = field
def on_header_value(self, value):
header = self._http_msg.header(self._last_header_field)
header.values.append(value)
# This is useful for handling 100 continue situations
if self._last_header_field.lower() == 'expect':
self._expect = value.lower()
self._last_header_field = None
class DownstreamHandler(ProxyHandler):
"""
This proxy handler manages data coming from downstream of the proxy.
This data comes from the client initiating the request against the
proxy.
"""
def __init__(self, downstream, filter_pl, connect_upstream):
super(DownstreamHandler, self).__init__(filter_pl, HttpRequest())
self._accumulator = AccumulationStream()
self._preread_body = AccumulationStream()
self._downstream = downstream
self._upstream = None
self._keep_alive = False
self._connect_upstream = connect_upstream
def on_req_method(self, method):
self._http_msg.method = method
def on_req_path(self, url):
self._http_msg.url = url
def on_headers_complete(self):
# Execute against the pipeline
action = self._filter_pl.on_request_head(self._http_msg)
# Make sure we handle 100 continue
if self._expect is not None and self._expect == '100-continue':
self._downstream.write(_100_CONTINUE)
# If we are intercepting the request body do some negotiation
if self._filter_pl.intercepts_req_body():
self._chunked = True
# If there's a content length, negotiate the tansfer encoding
if self._http_msg.get_header('content-length'):
self._http_msg.remove_header('content-length')
self._http_msg.remove_header('transfer-encoding')
te_header = self._http_msg.header(
'transfer-encoding').values.append('chunked')
# If we're rejecting then we're not going to connect to upstream
if not action.should_connect_upstream():
self._intercepted = True
self._response_tuple = action.payload
else:
# Hold up on the client side until we're done negotiating
# connections.
self._downstream.handle.disable_reading()
# We're routing to upstream; we need to know where to go
if action.is_routing():
self._connect_upstream(self._http_msg, action.payload)
else:
self._connect_upstream(self._http_msg)
def on_body(self, chunk, length, is_chunked):
# Rejections simply discard the body
if not self._intercepted:
# Hold up on the client side until we're done with this chunk
self._downstream.handle.disable_reading()
# Point to the chunk for our data
data = chunk
# Run through the filter PL and see if we need to modify
# the body
self._accumulator.reset()
self._filter_pl.on_request_body(data, self._accumulator)
# Check to see if the filter modified the body
if self._accumulator.size() > 0:
data = self._accumulator.data
if self._upstream:
# When we write to upstream set the callback to resume
# reading from downstream.
_write_to_stream(self._upstream,
data,
is_chunked,
self._downstream.handle.resume_reading)
else:
# If we're not connected upstream, store the fragment
# for later. We will resume reading once upstream
# connects
self._preread_body.write(data)
def on_upstream_connect(self, upstream):
self._upstream = upstream
if self._preread_body.size() > 0:
_write_to_stream(self._upstream,
self._preread_body,
self._chunked,
self._downstream.handle.resume_reading)
# Empty the object
self._preread_body.reset()
else:
self._downstream.handle.resume_reading()
def on_message_complete(self, is_chunked, keep_alive):
self._keep_alive = bool(keep_alive)
if self._intercepted:
# Commit the response to the client (aka downstream)
writer = ResponseWriter(
self._response_tuple[0],
self._response_tuple[1],
self._downstream,
self.complete)
writer.commit()
elif is_chunked:
# Finish the body with the closing chunk for the origin server
self._upstream.write(_CHUNK_CLOSE, self.complete)
def complete(self):
if self._keep_alive:
# Clean up the message obj
self._http_msg = HttpRequest()
else:
# We're done here - close up shop
self._downstream.close()
class ResponseWriter(object):
def __init__(self, response, source, stream, on_complete):
self._on_complete = on_complete
self._response = response
self._source = source
self._stream = stream
self._written = 0
def commit(self):
self.write_head()
def write_head(self):
if self._source is not None:
if self._response.get_header('content-length'):
self._response.remove_header('content-length')
self._response.remove_header('transfer-encoding')
# Set to chunked to make the transfer easier
self._response.header('transfer-encoding').values.append('chunked')
self._stream.write(self._response.to_bytes(), self.write_body)
def write_body(self):
if self._source is not None:
src_type = type(self._source)
if src_type is bytearray or src_type is bytes or src_type is str:
self.write_body_as_array()
elif src_type is file:
self.write_body_as_file()
else:
raise TypeError(
'Unable to use {} as response body'.format(src_type))
def write_body_as_file(self):
next_chunk = self._source.read(_MAX_CHUNK_SIZE)
if len(next_chunk) == 0:
self._stream.write(_CHUNK_CLOSE, self._on_complete)
else:
_write_chunk_to_stream(
self._stream,
next_chunk,
self.write_body_as_file)
def write_body_as_array(self):
src_len = len(self._source)
if self._written == src_len:
self._stream.write(_CHUNK_CLOSE, self._on_complete)
else:
max_idx = self._written + _MAX_CHUNK_SIZE
limit_idx = max_idx if max_idx < src_len else src_len
next_chunk = self._source[self._written:limit_idx]
self._written = limit_idx
_write_chunk_to_stream(
self._stream,
next_chunk,
self.write_body_as_array)
class UpstreamHandler(ProxyHandler):
"""
This proxy handler manages data coming from upstream of the proxy. This
data usually comes from the origin service or it may come from another
proxy.
"""
def __init__(self, downstream, upstream, filter_pl, request):
super(UpstreamHandler, self).__init__(filter_pl, HttpResponse())
self._downstream = downstream
self._upstream = upstream
self._request = request
def on_status(self, status_code):
self._http_msg.status = str(status_code)
def on_headers_complete(self):
action = self._filter_pl.on_response_head(
self._http_msg, self._request)
# If we are intercepting the response body do some negotiation
if self._filter_pl.intercepts_resp_body():
# If there's a content length, negotiate the transfer encoding
if self._http_msg.get_header('content-length'):
self._chunked = True
self._http_msg.remove_header('content-length')
self._http_msg.remove_header('transfer-encoding')
self._http_msg.header(
'transfer-encoding').values.append('chunked')
if action.is_rejecting():
self._intercepted = True
self._response_tuple = action.payload
else:
self._downstream.write(self._http_msg.to_bytes())
def on_body(self, bytes, length, is_chunked):
# Rejections simply discard the body
if not self._intercepted:
accumulator = AccumulationStream()
data = bytes
self._filter_pl.on_response_body(data, accumulator, self._request)
if accumulator.size() > 0:
data = accumulator.bytes
# Hold up on the upstream side until we're done sending this chunk
self._upstream.handle.disable_reading()
# When we write to the stream set the callback to resume
# reading from upstream.
_write_to_stream(
self._downstream,
data,
is_chunked or self._chunked,
self._upstream.handle.resume_reading)
def on_message_complete(self, is_chunked, keep_alive):
callback = self._upstream.close
self._upstream.handle.disable_reading()
if keep_alive:
self._http_msg = HttpResponse()
callback = self._downstream.handle.resume_reading
if self._intercepted:
# Serialize our message to them
self._downstream.write(self._http_msg.to_bytes(), callback)
elif is_chunked or self._chunked:
# Finish the last chunk.
self._downstream.write(_CHUNK_CLOSE, callback)
else:
callback()
class ConnectionTracker(object):
def __init__(self, on_stream_live, on_target_closed, on_target_error):
self._streams = dict()
self._target_in_use = None
self._on_stream_live = on_stream_live
self._on_target_closed = on_target_closed
self._on_target_error = on_target_error
def destroy(self):
for stream in self._streams.values():
if not stream.closed():
stream.close()
def connect(self, target):
self._target_in_use = target
live_stream = self._streams.get(target)
if live_stream:
# Make the cb ourselves since the socket's already connected
self._on_stream_live(live_stream)
else:
self._new_connection(target)
def _new_connection(self, target):
host, port, protocol = target
# Set up our upstream socket
us_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
# Create and bind the IO Handler based on selected protocol
if protocol == PROTOCOL_HTTP:
live_stream = SocketIOHandler(us_sock)
elif protocol == PROTOCOL_HTTPS:
live_stream = SSLSocketIOHandler(us_sock)
else:
raise Exception('Unknown protocol: {}.'.format(protocol))
# Store the stream reference for later use
self._streams[target] = live_stream
# Build and set the on_close callback
def on_close():
# Disable error cb on close
live_stream.on_error(None)
del self._streams[target]
if self._target_in_use == target:
self.destroy()
self._on_target_closed()
live_stream.on_close(on_close)
# Build and set the on_error callback
def on_error(error):
# Dsiable close cb on error
live_stream.on_close(None)
if self._target_in_use == target:
del self._streams[target]
if self._target_in_use == target:
self.destroy()
self._on_target_error(error)
live_stream.on_error(on_error)
# Build and set the on_connect callback and then connect
def on_connect():
self._on_stream_live(live_stream)
live_stream.connect((host, port), on_connect)
class ProxyConnection(object):
"""
A proxy connection manages the lifecycle of the sockets opened during a
proxied client request against Pyrox.
"""
def __init__(self, us_filter_pl, ds_filter_pl, downstream, router):
self._ds_filter_pl = ds_filter_pl
self._us_filter_pl = us_filter_pl
self._router = router
self._upstream_parser = None
self._upstream_tracker = ConnectionTracker(
self._on_upstream_live,
self._on_upstream_close,
self._on_upstream_error)
# Setup all of the wiring for downstream
self._downstream = downstream
self._downstream_handler = DownstreamHandler(
self._downstream,
self._ds_filter_pl,
self._connect_upstream)
self._downstream_parser = RequestParser(self._downstream_handler)
self._downstream.on_close(self._on_downstream_close)
self._downstream.read(self._on_downstream_read)
def _connect_upstream(self, request, route=None):
if route is not None:
# This does some type checking for routes passed up via filter
self._router.set_next(route)
upstream_target = self._router.get_next()
if upstream_target is None:
self._downstream.write(
_UPSTREAM_UNAVAILABLE.to_bytes(),
self._downstream.handle.resume_reading)
return
# Hold downstream reads
self._hold_downstream = True
# Update the request to proxy upstream and store it
request.replace_header('host').values.append(
'{}:{}'.format(upstream_target[0], upstream_target[1]))
self._request = request
try:
self._upstream_tracker.connect(upstream_target)
except Exception as ex:
_LOG.exception(ex)
def _on_upstream_live(self, upstream):
self._upstream_handler = UpstreamHandler(
self._downstream,
upstream,
self._us_filter_pl,
self._request)
if self._upstream_parser:
self._upstream_parser.destroy()
self._upstream_parser = ResponseParser(self._upstream_handler)
# Set the read callback
upstream.read(self._on_upstream_read)
# Send the proxied request object
upstream.write(self._request.to_bytes())
# Drop the ref to the proxied request head
self._request = None
# Set up our downstream handler
self._downstream_handler.on_upstream_connect(upstream)
def _on_downstream_close(self):
self._upstream_tracker.destroy()
self._downstream_parser.destroy()
self._downstream_parser = None
def _on_downstream_error(self, error):
_LOG.error('Downstream error: {}'.format(error))
if not self._downstream.closed():
self._downstream.close()
def _on_upstream_error(self, error):
_LOG.error('Upstream error: {}'.format(error))
if not self._downstream.closed():
self._downstream.write(_BAD_GATEWAY_RESP.to_bytes())
def _on_upstream_close(self):
if not self._downstream.closed():
self._downstream.close()
if self._upstream_parser is not None:
self._upstream_parser.destroy()
self._upstream_parser = None
def _on_downstream_read(self, data):
try:
self._downstream_parser.execute(data)
except StreamClosedError:
pass
except Exception as ex:
_LOG.exception(ex)
def _on_upstream_read(self, data):
try:
self._upstream_parser.execute(data)
except StreamClosedError:
pass
except Exception as ex:
_LOG.exception(ex)
class TornadoHttpProxy(TCPServer):
"""
Subclass of the Tornado TCPServer that lets us set up the Pyrox proxy
orchestrations.
:param pipelines: This is a tuple with the upstream filter pipeline factory
as the first element and the downstream filter pipeline
factory as the second element.
"""
def __init__(self, pipeline_factories, default_us_targets=None,
ssl_options=None):
super(TornadoHttpProxy, self).__init__(ssl_options=ssl_options)
self._router = RoundRobinRouter(default_us_targets)
self.us_pipeline_factory = pipeline_factories[0]
self.ds_pipeline_factory = pipeline_factories[1]
def handle_stream(self, downstream, address):
connection_handler = ProxyConnection(
self.us_pipeline_factory(),
self.ds_pipeline_factory(),
downstream,
self._router)
|
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CellState Manager
"""
import copy
import datetime
import functools
from oslo.config import cfg
from nova.cells import rpc_driver
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
cell_state_manager_opts = [
cfg.IntOpt('db_check_interval',
default=60,
help='Seconds between getting fresh cell info from db.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells')
CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_state_manager_opts, group='cells')
class CellState(object):
"""Holds information for a particular cell."""
def __init__(self, cell_name, is_me=False):
self.name = cell_name
self.is_me = is_me
self.last_seen = datetime.datetime.min
self.capabilities = {}
self.capacities = {}
self.db_info = {}
# TODO(comstud): The DB will specify the driver to use to talk
# to this cell, but there's no column for this yet. The only
# available driver is the rpc driver.
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = dict(
[(k, v) for k, v in cell_db_info.iteritems()
if k != 'name'])
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
self.last_seen = timeutils.utcnow()
self.capabilities = cell_metadata
def update_capacities(self, capacities):
"""Update capacity information for a cell."""
self.last_seen = timeutils.utcnow()
self.capacities = capacities
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset',
'username', 'rpc_host', 'rpc_port']
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
cell_info[field] = self.db_info[field]
return cell_info
def send_message(self, message):
"""Send a message to a cell. Just forward this to the driver,
passing ourselves and the message as arguments.
"""
self.driver.send_message_to_cell(self, message)
def __repr__(self):
me = "me" if self.is_me else "not_me"
return "Cell '%s' (%s)" % (self.name, me)
def sync_from_db(f):
"""Use as a decorator to wrap methods that use cell information to
make sure they sync the latest information from the DB periodically.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if self._time_to_sync():
self._cell_db_sync()
return f(self, *args, **kwargs)
return wrapper
class CellStateManager(base.Base):
def __init__(self, cell_state_cls=None):
super(CellStateManager, self).__init__()
if not cell_state_cls:
cell_state_cls = CellState
self.cell_state_cls = cell_state_cls
self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
self._cell_db_sync()
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
name, value = cap.split('=', 1)
if ';' in value:
values = set(value.split(';'))
else:
values = set([value])
my_cell_capabs[name] = values
self.my_cell_state.update_capabilities(my_cell_capabs)
def _refresh_cells_from_db(self, ctxt):
"""Make our cell info map match the db."""
# Add/update existing cells ...
db_cells = self.db.cell_get_all(ctxt)
db_cells_dict = dict([(cell['name'], cell) for cell in db_cells])
# Update current cells. Delete ones that disappeared
for cells_dict in (self.parent_cells, self.child_cells):
for cell_name, cell_info in cells_dict.items():
is_parent = cell_info.db_info['is_parent']
db_dict = db_cells_dict.get(cell_name)
if db_dict and is_parent == db_dict['is_parent']:
cell_info.update_db_info(db_dict)
else:
del cells_dict[cell_name]
# Add new cells
for cell_name, db_info in db_cells_dict.items():
if db_info['is_parent']:
cells_dict = self.parent_cells
else:
cells_dict = self.child_cells
if cell_name not in cells_dict:
cells_dict[cell_name] = self.cell_state_cls(cell_name)
cells_dict[cell_name].update_db_info(db_info)
def _time_to_sync(self):
"""Is it time to sync the DB against our memory cache?"""
diff = timeutils.utcnow() - self.last_cell_db_check
return diff.seconds >= CONF.cells.db_check_interval
def _update_our_capacity(self, context):
"""Update our capacity in the self.my_cell_state CellState.
This will add/update 2 entries in our CellState.capacities,
'ram_free' and 'disk_free'.
The values of these are both dictionaries with the following
format:
{'total_mb': <total_memory_free_in_the_cell>,
'units_by_mb: <units_dictionary>}
<units_dictionary> contains the number of units that we can
build for every instance_type that we have. This number is
computed by looking at room available on every compute_node.
Take the following instance_types as an example:
[{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
{'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
capacities['ram_free']['units_by_mb'] would contain the following:
{'1024': <number_of_instances_that_will_fit>,
'2048': <number_of_instances_that_will_fit>}
capacities['disk_free']['units_by_mb'] would contain the following:
{'122880': <number_of_instances_that_will_fit>,
'225280': <number_of_instances_that_will_fit>}
Units are in MB, so 122880 = (10 + 100) * 1024.
NOTE(comstud): Perhaps we should only report a single number
available per instance_type.
"""
reserve_level = CONF.cells.reserve_percent / 100.0
compute_hosts = {}
def _get_compute_hosts():
compute_nodes = self.db.compute_node_get_all(context)
for compute in compute_nodes:
service = compute['service']
if not service or service['disabled']:
continue
host = service['host']
compute_hosts[host] = {
'free_ram_mb': compute['free_ram_mb'],
'free_disk_mb': compute['free_disk_gb'] * 1024,
'total_ram_mb': compute['memory_mb'],
'total_disk_mb': compute['local_gb'] * 1024}
_get_compute_hosts()
if not compute_hosts:
self.my_cell_state.update_capacities({})
return
ram_mb_free_units = {}
disk_mb_free_units = {}
total_ram_mb_free = 0
total_disk_mb_free = 0
def _free_units(total, free, per_inst):
if per_inst:
min_free = total * reserve_level
free = max(0, free - min_free)
return int(free / per_inst)
else:
return 0
def _update_from_values(values, instance_type):
memory_mb = instance_type['memory_mb']
disk_mb = (instance_type['root_gb'] +
instance_type['ephemeral_gb']) * 1024
ram_mb_free_units.setdefault(str(memory_mb), 0)
disk_mb_free_units.setdefault(str(disk_mb), 0)
ram_free_units = _free_units(compute_values['total_ram_mb'],
compute_values['free_ram_mb'], memory_mb)
disk_free_units = _free_units(compute_values['total_disk_mb'],
compute_values['free_disk_mb'], disk_mb)
ram_mb_free_units[str(memory_mb)] += ram_free_units
disk_mb_free_units[str(disk_mb)] += disk_free_units
instance_types = self.db.instance_type_get_all(context)
for compute_values in compute_hosts.values():
total_ram_mb_free += compute_values['free_ram_mb']
total_disk_mb_free += compute_values['free_disk_mb']
for instance_type in instance_types:
_update_from_values(compute_values, instance_type)
capacities = {'ram_free': {'total_mb': total_ram_mb_free,
'units_by_mb': ram_mb_free_units},
'disk_free': {'total_mb': total_disk_mb_free,
'units_by_mb': disk_mb_free_units}}
self.my_cell_state.update_capacities(capacities)
@utils.synchronized('cell-db-sync')
def _cell_db_sync(self):
"""Update status for all cells if it's time. Most calls to
this are from the check_for_update() decorator that checks
the time, but it checks outside of a lock. The duplicate
check here is to prevent multiple threads from pulling the
information simultaneously.
"""
if self._time_to_sync():
LOG.debug(_("Updating cell cache from db."))
self.last_cell_db_check = timeutils.utcnow()
ctxt = context.get_admin_context()
self._refresh_cells_from_db(ctxt)
self._update_our_capacity(ctxt)
@sync_from_db
def get_cell_info_for_neighbors(self):
"""Return cell information for all neighbor cells."""
cell_list = [cell.get_cell_info()
for cell in self.child_cells.itervalues()]
cell_list.extend([cell.get_cell_info()
for cell in self.parent_cells.itervalues()])
return cell_list
@sync_from_db
def get_my_state(self):
"""Return information for my (this) cell."""
return self.my_cell_state
@sync_from_db
def get_child_cells(self):
"""Return list of child cell_infos."""
return self.child_cells.values()
@sync_from_db
def get_parent_cells(self):
"""Return list of parent cell_infos."""
return self.parent_cells.values()
@sync_from_db
def get_parent_cell(self, cell_name):
return self.parent_cells.get(cell_name)
@sync_from_db
def get_child_cell(self, cell_name):
return self.child_cells.get(cell_name)
@sync_from_db
def update_cell_capabilities(self, cell_name, capabilities):
"""Update capabilities for a cell."""
cell = self.child_cells.get(cell_name)
if not cell:
cell = self.parent_cells.get(cell_name)
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capabilities"),
{'cell_name': cell_name})
return
# Make sure capabilities are sets.
for capab_name, values in capabilities.items():
capabilities[capab_name] = set(values)
cell.update_capabilities(capabilities)
@sync_from_db
def update_cell_capacities(self, cell_name, capacities):
"""Update capacities for a cell."""
cell = self.child_cells.get(cell_name)
if not cell:
cell = self.parent_cells.get(cell_name)
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capacities"),
{'cell_name': cell_name})
return
cell.update_capacities(capacities)
@sync_from_db
def get_our_capabilities(self, include_children=True):
capabs = copy.deepcopy(self.my_cell_state.capabilities)
if include_children:
for cell in self.child_cells.values():
if timeutils.is_older_than(cell.last_seen,
CONF.cells.mute_child_interval):
continue
for capab_name, values in cell.capabilities.items():
if capab_name not in capabs:
capabs[capab_name] = set([])
capabs[capab_name] |= values
return capabs
def _add_to_dict(self, target, src):
for key, value in src.items():
if isinstance(value, dict):
target.setdefault(key, {})
self._add_to_dict(target[key], value)
continue
target.setdefault(key, 0)
target[key] += value
@sync_from_db
def get_our_capacities(self, include_children=True):
capacities = copy.deepcopy(self.my_cell_state.capacities)
if include_children:
for cell in self.child_cells.values():
self._add_to_dict(capacities, cell.capacities)
return capacities
@sync_from_db
def get_capacities(self, cell_name=None):
if not cell_name or cell_name == self.my_cell_state.name:
return self.get_our_capacities()
if cell_name in self.child_cells:
return self.child_cells[cell_name].capacities
raise exception.CellNotFound(cell_name=cell_name)
|
|
"""
ulmo.ncdc.cirs.core
~~~~~~~~~~~~~~~~~~~
This module provides direct access to the `National Climatic Data Center`_
`Climate Index Reference Sequential (CIRS)`_ drought dataset.
.. _National Climatic Data Center: http://www.ncdc.noaa.gov
.. _Climate Index Reference Sequential (CIRS): http://www1.ncdc.noaa.gov/pub/data/cirs/
"""
import distutils
import os.path
import pandas
from ulmo import util
CIRS_DIR = util.get_ulmo_dir('ncdc/cirs')
NO_DATA_VALUES = {
'cddc': '-9999.',
'hddc': '-9999.',
'pcpn': '-9.99',
'pdsi': '-99.99',
'phdi': '-99.99',
'pmdi': '-99.99',
'sp01': '-99.99',
'sp02': '-99.99',
'sp03': '-99.99',
'sp06': '-99.99',
'sp09': '-99.99',
'sp12': '-99.99',
'sp24': '-99.99',
'tmpc': '-99.90',
'zndx': '-99.99',
}
def get_data(elements=None, by_state=False, location_names='abbr', as_dataframe=False, use_file=None):
"""Retrieves data.
Parameters
----------
elements : ``None`, str or list
The element(s) for which to get data for. If ``None`` (default), then
all elements are used. An individual element is a string, but a list or
tuple of them can be used to specify a set of elements. Elements are:
* 'cddc': Cooling Degree Days
* 'hddc': Heating Degree Days
* 'pcpn': Precipitation
* 'pdsi': Palmer Drought Severity Index
* 'phdi': Palmer Hydrological Drought Index
* 'pmdi': Modified Palmer Drought Severity Index
* 'sp01': 1-month Standardized Precipitation Index
* 'sp02': 2-month Standardized Precipitation Index
* 'sp03': 3-month Standardized Precipitation Index
* 'sp06': 6-month Standardized Precipitation Index
* 'sp09': 9-month Standardized Precipitation Index
* 'sp12': 12-month Standardized Precipitation Index
* 'sp24': 24-month Standardized Precipitation Index
* 'tmpc': Temperature
* 'zndx': ZNDX
by_state : bool
If False (default), divisional data will be retrieved. If True, then
regional data will be retrieved.
location_names : str or ``None``
This parameter defines what (if any) type of names will be added to the
values. If set to 'abbr' (default), then abbreviated location names
will be used. If 'full', then full location names will be used. If set
to None, then no location name will be added and the only identifier
will be the location_codes (this is the most memory-conservative
option).
as_dataframe : bool
If ``False`` (default), a list of values dicts is returned. If ``True``,
a dict with element codes mapped to equivalent pandas.DataFrame objects
will be returned. The pandas dataframe is used internally, so setting
this to ``True`` is faster as it skips a somewhat expensive
serialization step.
use_file : ``None``, file-like object or str
If ``None`` (default), then data will be automatically retrieved from
the web. If a file-like object or a file path string, then the file will
be used to read data from. This is intended to be used for reading in
previously-downloaded versions of the dataset.
Returns
-------
data : list or pandas.DataFrame
A list of value dicts or a pandas.DataFrame containing data. See
the ``as_dataframe`` parameter for more.
"""
if isinstance(elements, basestring):
elements = [elements]
elif elements is None:
elements = [
'cddc',
'hddc',
'pcpn',
'pdsi',
'phdi',
'pmdi',
'sp01',
'sp02',
'sp03',
'sp06',
'sp09',
'sp12',
'sp24',
'tmpc',
'zndx',
]
df = None
for element in elements:
element_file = _get_element_file(use_file, element, elements, by_state)
element_df = _get_element_data(element, by_state, element_file, location_names)
keys = ['location_code', 'year', 'month']
for append_key in ['division', 'state', 'state_code']:
if append_key in element_df.columns:
keys.append(append_key)
element_df.set_index(keys, inplace=True)
if df is None:
df = element_df
else:
df = df.join(element_df, how='outer')
df = df.reset_index()
df = _resolve_location_names(df, location_names, by_state)
if as_dataframe:
return df
else:
return df.T.to_dict().values()
def _get_element_data(element, by_state, use_file, location_names):
if use_file:
url = None
path = None
else:
url = _get_url(element, by_state)
filename = url.rsplit('/', 1)[-1]
path = os.path.join(CIRS_DIR, filename)
with util.open_file_for_url(url, path, use_file=use_file) as f:
element_df = _parse_values(f, by_state, location_names, element)
return element_df
def _get_element_file(use_file, element, elements, by_state):
if isinstance(use_file, basestring):
if os.path.basename(use_file) == '':
if len(elements) > 1:
assert ValueError(
"'use_file' must be a path to a directory if using "
"'use_file' with multiple elements")
return use_file + _get_filename(element, by_state, os.path.dirname(use_file))
return use_file
def _get_filename(element, by_state, dir_path):
files = os.listdir(dir_path)
return _most_recent(files, element, by_state)
def _get_url(element, by_state):
ftp_dir = "ftp://ftp.ncdc.noaa.gov/pub/data/cirs/climdiv/"
files = util.dir_list(ftp_dir)
most_recent = _most_recent(files, element, by_state)
return ftp_dir + most_recent
def _most_recent(files, element, by_state):
geographic_extent = 'st' if by_state else 'dv'
match_str = 'climdiv-{element}{geographic_extent}'.format(
element=element,
geographic_extent=geographic_extent,
)
matches = filter(lambda s: s.startswith(match_str), files)
return sorted(matches, key=_file_key)[0]
def _file_key(filename):
version_str = filename.split('-')[2][1:]
return distutils.version.StrictVersion(version_str)
def _parse_values(file_handle, by_state, location_names, element):
if by_state:
id_columns = [
('location_code', 0, 3, None),
#('division', 3, 3, None), # ignored in state files
#('element', 4, 6, None), # element is redundant
('year', 6, 10, None),
]
else:
id_columns = [
('location_code', 0, 2, None),
('division', 2, 4, None),
#('element', 4, 6, None), # element is redundant
('year', 6, 10, None),
]
year_col_end = id_columns[-1][2]
month_columns = [
(str(n), year_col_end - 6 + (7 * n), year_col_end + (7 * n), None)
for n in range(1, 13)
]
columns = id_columns + month_columns
na_values = [NO_DATA_VALUES.get(element)]
parsed = util.parse_fwf(file_handle, columns, na_values=na_values)
month_columns = [id_column[0] for id_column in id_columns]
melted = pandas.melt(parsed, id_vars=month_columns)\
.rename(columns={'variable': 'month'})
melted.month = melted.month.astype(int)
# throw away NaNs
melted = melted[melted['value'].notnull()]
data = melted.rename(columns={
'value': element,
})
return data
def _resolve_location_names(df, location_names, by_state):
if location_names is None:
return df
elif location_names not in ('abbr', 'full'):
raise ValueError("location_names should be set to either None, 'abbr' or 'full'")
else:
locations = _states_regions_dataframe()[location_names]
with_locations = df.join(locations, on='location_code')
if by_state:
return with_locations.rename(columns={
location_names: 'location',
})
else:
return with_locations.rename(columns={
location_names: 'state',
'location_code': 'state_code',
})
def _states_regions_dataframe():
"""returns a dataframe indexed by state/region code with columns for the
name and abbrevitation (abbr) to use
"""
STATES_REGIONS = {
# code: (full name, abbrevation)
1: ("Alabama", "AL"),
2: ("Arizona", "AZ"),
3: ("Arkansas", "AR"),
4: ("California", "CA"),
5: ("Colorado", "CO"),
6: ("Connecticut", "CT"),
7: ("Delaware", "DE"),
8: ("Florida", "FL"),
9: ("Georgia", "GA"),
10: ("Idaho", "ID"),
11: ("Illinois", "IL"),
12: ("Indiana", "IN"),
13: ("Iowa", "IA"),
14: ("Kansas", "KS"),
15: ("Kentucky", "KY"),
16: ("Louisiana", "LA"),
17: ("Maine", "ME"),
18: ("Maryland", "MD"),
19: ("Massachusetts", "MA"),
20: ("Michigan", "MI"),
21: ("Minnesota", "MN"),
22: ("Mississippi", "MS"),
23: ("Missouri", "MO"),
24: ("Montana", "MT"),
25: ("Nebraska", "NE"),
26: ("Nevada", "NV"),
27: ("New Hampshire", "NH"),
28: ("New Jersey", "NJ"),
29: ("New Mexico", "NM"),
30: ("New York", "NY"),
31: ("North Carolina", "NC"),
32: ("North Dakota", "ND"),
33: ("Ohio", "OH"),
34: ("Oklahoma", "OK"),
35: ("Oregon", "OR"),
36: ("Pennsylvania", "PA"),
37: ("Rhode Island", "RI"),
38: ("South Carolina", "SC"),
39: ("South Dakota", "SD"),
40: ("Tennessee", "TN"),
41: ("Texas", "TX"),
42: ("Utah", "UT"),
43: ("Vermont", "VT"),
44: ("Virginia", "VA"),
45: ("Washington", "WA"),
46: ("West Virginia", "WV"),
47: ("Wisconsin", "WI"),
48: ("Wyoming", "WY"),
101: ("Northeast Region", "ner"),
102: ("East North Central Region", "encr"),
103: ("Central Region", "cr"),
104: ("Southeast Region", "ser"),
105: ("West North Central Region", "wncr"),
106: ("South Region", "sr"),
107: ("Southwest Region", "swr"),
108: ("Northwest Region", "nwr"),
109: ("West Region", "wr"),
110: ("National (contiguous 48 States)", "national"),
# The following are the range of code values for the National Weather Service Regions, river basins, and agricultural regions.
111: ("NWS: Great Plains", "nws:gp"),
115: ("NWS: Southern Plains and Gulf Coast", "nws:spgc"),
120: ("NWS: US Rockies and Westward", "nws:usrw"),
121: ("NWS: Eastern Region", "nws:er"),
122: ("NWS: Southern Region", "nws:sr"),
123: ("NWS: Central Region", "nws:cr"),
124: ("NWS: Western Region", "nws:wr"),
201: ("NWS: Pacific Northwest Basin", "nws:pnwb"),
202: ("NWS: California River Basin", "nws:crb"),
203: ("NWS: Great Basin", "nws:gb"),
204: ("NWS: Lower Colorado River Basin", "nws:lcrb"),
205: ("NWS: Upper Colorado River Basin", "nws:urcb"),
206: ("NWS: Rio Grande River Basin", "nws:rgrb"),
207: ("NWS: Texas Gulf Coast River Basin", "nws:tgcrb"),
208: ("NWS: Arkansas-White-Red Basin", "nws:awrb"),
209: ("NWS: Lower Mississippi River Basin", "nws:lmrb"),
210: ("NWS: Missouri River Basin", "nws:mrb"),
211: ("NWS: Souris-Red-Rainy Basin", "nws:srrb"),
212: ("NWS: Upper Mississippi River Basin", "nws:umrb"),
213: ("NWS: Great Lakes Basin", "nws:glb"),
214: ("NWS: Tennessee River Basin", "nws:trb"),
215: ("NWS: Ohio River Basin", "nws:ohrb"),
216: ("NWS: South Atlantic-Gulf Basin", "nws:sagb"),
217: ("NWS: Mid-Atlantic Basin", "nws:mab"),
218: ("NWS: New England Basin", "nws:neb"),
220: ("NWS: Mississippi River Basin & Tributaties (N. of Memphis, TN",
"nws:mrbt"),
# below( codes are weighted by area)
250: ("Area: Spring Wheat Belt", "area:swb"),
255: ("Area: Primary Hard Red Winter Wheat Belt", "area:phrwwb"),
256: ("Area: Winter Wheat Belt", "area:wwb"),
260: ("Area: Primary Corn and Soybean Belt", "area:pcsb"),
261: ("Area: Corn Belt", "area:cb"),
262: ("Area: Soybean Belt", "area:sb"),
265: ("Area: Cotton Belt", "area:cb"),
# below( codes are weighted by productivity)
350: ("Prod: Spring Wheat Belt", "prod:swb"),
356: ("Prod: Winter Wheat Belt", "prod:wwb"),
361: ("Prod: Corn Belt", "prod:cb"),
362: ("Prod: Soybean Belt", "prod:sb"),
365: ("Prod: Cotton Belt", "prod:cb"),
# below( codes are for percent productivity in the Palmer Z Index categories)
450: ("% Prod: Spring Wheat Belt", "%prod:swb"),
456: ("% Prod: Winter Wheat Belt", "%prod:wwb"),
461: ("% Prod: Corn Belt", "%prod:cb"),
462: ("% Prod: Soybean Belt", "%prod:sb"),
465: ("% Prod: Cotton Belt", "%prod:cb"),
}
return pandas.DataFrame(STATES_REGIONS).T.rename(columns={0: 'full', 1: 'abbr'})
|
|
# -*- encoding: utf-8
from sqlalchemy.testing import eq_
from sqlalchemy import schema
from sqlalchemy.sql import table, column
from sqlalchemy.databases import mssql
from sqlalchemy.dialects.mssql import mxodbc
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import sql
from sqlalchemy import Integer, String, Table, Column, select, MetaData,\
update, delete, insert, extract, union, func, PrimaryKeyConstraint, \
UniqueConstraint, Index, Sequence, literal
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect(legacy_schema_aliasing=False)
def test_true_false(self):
self.assert_compile(
sql.false(), "0"
)
self.assert_compile(
sql.true(),
"1"
)
def test_select(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select(),
'SELECT sometable.somecolumn FROM sometable')
def test_select_with_nolock(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)')
def test_select_with_nolock_schema(self):
m = MetaData()
t = Table('sometable', m, Column('somecolumn', Integer),
schema='test_schema')
self.assert_compile(
t.select().with_hint(t, 'WITH (NOLOCK)'),
'SELECT test_schema.sometable.somecolumn '
'FROM test_schema.sometable WITH (NOLOCK)')
def test_join_with_hint(self):
t1 = table('t1',
column('a', Integer),
column('b', String),
column('c', String),
)
t2 = table('t2',
column("a", Integer),
column("b", Integer),
column("c", Integer),
)
join = t1.join(t2, t1.c.a == t2.c.a).\
select().with_hint(t1, 'WITH (NOLOCK)')
self.assert_compile(
join,
'SELECT t1.a, t1.b, t1.c, t2.a, t2.b, t2.c '
'FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a'
)
def test_insert(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.insert(),
'INSERT INTO sometable (somecolumn) VALUES '
'(:somecolumn)')
def test_update(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.update(t.c.somecolumn == 7),
'UPDATE sometable SET somecolumn=:somecolum'
'n WHERE sometable.somecolumn = '
':somecolumn_1', dict(somecolumn=10))
def test_insert_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.insert().
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"INSERT INTO sometable WITH (PAGLOCK) "
"(somecolumn) VALUES (:somecolumn)"
)
def test_update_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"UPDATE sometable WITH (PAGLOCK) "
"SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_update_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.update().where(t.c.somecolumn == "q").
values(somecolumn="x").
with_hint("XYZ", "mysql"),
"UPDATE sometable SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_hint(self):
t = table('sometable', column('somecolumn'))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.delete().where(t.c.somecolumn == "q").
with_hint("WITH (PAGLOCK)",
selectable=targ,
dialect_name=darg),
"DELETE FROM sometable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = :somecolumn_1"
)
def test_delete_exclude_hint(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(
t.delete().
where(t.c.somecolumn == "q").
with_hint("XYZ", dialect_name="mysql"),
"DELETE FROM sometable WHERE "
"sometable.somecolumn = :somecolumn_1"
)
def test_update_from_hint(self):
t = table('sometable', column('somecolumn'))
t2 = table('othertable', column('somecolumn'))
for darg in ("*", "mssql"):
self.assert_compile(
t.update().where(t.c.somecolumn == t2.c.somecolumn).
values(somecolumn="x").
with_hint("WITH (PAGLOCK)",
selectable=t2,
dialect_name=darg),
"UPDATE sometable SET somecolumn=:somecolumn "
"FROM sometable, othertable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = othertable.somecolumn"
)
def test_update_to_select_schema(self):
meta = MetaData()
table = Table(
"sometable", meta,
Column("sym", String),
Column("val", Integer),
schema="schema"
)
other = Table(
"#other", meta,
Column("sym", String),
Column("newval", Integer)
)
stmt = table.update().values(
val=select([other.c.newval]).
where(table.c.sym == other.c.sym).as_scalar())
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"(SELECT [#other].newval FROM [#other] "
"WHERE [schema].sometable.sym = [#other].sym)",
)
stmt = table.update().values(val=other.c.newval).\
where(table.c.sym == other.c.sym)
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"[#other].newval FROM [schema].sometable, "
"[#other] WHERE [schema].sometable.sym = [#other].sym",
)
# TODO: not supported yet.
# def test_delete_from_hint(self):
# t = table('sometable', column('somecolumn'))
# t2 = table('othertable', column('somecolumn'))
# for darg in ("*", "mssql"):
# self.assert_compile(
# t.delete().where(t.c.somecolumn==t2.c.somecolumn).
# with_hint("WITH (PAGLOCK)",
# selectable=t2,
# dialect_name=darg),
# ""
# )
def test_strict_binds(self):
"""test the 'strict' compiler binds."""
from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler
mxodbc_dialect = mxodbc.dialect()
mxodbc_dialect.statement_compiler = MSSQLStrictCompiler
t = table('sometable', column('foo'))
for expr, compile in [
(
select([literal("x"), literal("y")]),
"SELECT 'x' AS anon_1, 'y' AS anon_2",
),
(
select([t]).where(t.c.foo.in_(['x', 'y', 'z'])),
"SELECT sometable.foo FROM sometable WHERE sometable.foo "
"IN ('x', 'y', 'z')",
),
(
t.c.foo.in_([None]),
"sometable.foo IN (NULL)"
)
]:
self.assert_compile(expr, compile, dialect=mxodbc_dialect)
def test_in_with_subqueries(self):
"""Test removal of legacy behavior that converted "x==subquery"
to use IN.
"""
t = table('sometable', column('somecolumn'))
self.assert_compile(t.select().where(t.c.somecolumn
== t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn = '
'(SELECT sometable.somecolumn FROM '
'sometable)')
self.assert_compile(t.select().where(t.c.somecolumn
!= t.select()),
'SELECT sometable.somecolumn FROM '
'sometable WHERE sometable.somecolumn != '
'(SELECT sometable.somecolumn FROM '
'sometable)')
def test_count(self):
t = table('sometable', column('somecolumn'))
self.assert_compile(t.count(),
'SELECT count(sometable.somecolumn) AS '
'tbl_row_count FROM sometable')
def test_noorderby_insubquery(self):
"""test that the ms-sql dialect removes ORDER BY clauses from
subqueries"""
table1 = table('mytable',
column('myid', Integer),
column('name', String),
column('description', String),
)
q = select([table1.c.myid],
order_by=[table1.c.myid]).alias('foo')
crit = q.c.myid == table1.c.myid
self.assert_compile(select(['*'], crit),
"SELECT * FROM (SELECT mytable.myid AS "
"myid FROM mytable) AS foo, mytable WHERE "
"foo.myid = mytable.myid")
def test_delete_schema(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True), schema='paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM paj.test WHERE paj.test.id = '
':id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM paj.test WHERE paj.test.id IN '
'(SELECT paj.test.id FROM paj.test '
'WHERE paj.test.id = :id_1)')
def test_delete_schema_multipart(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer,
primary_key=True),
schema='banana.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM banana.paj.test WHERE '
'banana.paj.test.id IN (SELECT banana.paj.test.id '
'FROM banana.paj.test WHERE '
'banana.paj.test.id = :id_1)')
def test_delete_schema_multipart_needs_quoting(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('id', Integer, primary_key=True),
schema='banana split.paj')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(tbl.delete().where(tbl.c.id.in_(s)),
'DELETE FROM [banana split].paj.test WHERE '
'[banana split].paj.test.id IN ('
'SELECT [banana split].paj.test.id FROM '
'[banana split].paj.test WHERE '
'[banana split].paj.test.id = :id_1)')
def test_delete_schema_multipart_both_need_quoting(self):
metadata = MetaData()
tbl = Table('test', metadata, Column('id', Integer,
primary_key=True),
schema='banana split.paj with a space')
self.assert_compile(tbl.delete(tbl.c.id == 1),
'DELETE FROM [banana split].[paj with a '
'space].test WHERE [banana split].[paj '
'with a space].test.id = :id_1')
s = select([tbl.c.id]).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id IN "
"(SELECT [banana split].[paj with a space].test.id "
"FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id = :id_1)"
)
def test_union(self):
t1 = table(
't1', column('col1'), column('col2'),
column('col3'), column('col4'))
t2 = table(
't2', column('col1'), column('col2'),
column('col3'), column('col4'))
s1, s2 = select(
[t1.c.col3.label('col3'), t1.c.col4.label('col4')],
t1.c.col2.in_(['t1col2r1', 't1col2r2'])), \
select([t2.c.col3.label('col3'), t2.c.col4.label('col4')],
t2.c.col2.in_(['t2col2r2', 't2col2r3']))
u = union(s1, s2, order_by=['col3', 'col4'])
self.assert_compile(u,
'SELECT t1.col3 AS col3, t1.col4 AS col4 '
'FROM t1 WHERE t1.col2 IN (:col2_1, '
':col2_2) UNION SELECT t2.col3 AS col3, '
't2.col4 AS col4 FROM t2 WHERE t2.col2 IN '
'(:col2_3, :col2_4) ORDER BY col3, col4')
self.assert_compile(u.alias('bar').select(),
'SELECT bar.col3, bar.col4 FROM (SELECT '
't1.col3 AS col3, t1.col4 AS col4 FROM t1 '
'WHERE t1.col2 IN (:col2_1, :col2_2) UNION '
'SELECT t2.col3 AS col3, t2.col4 AS col4 '
'FROM t2 WHERE t2.col2 IN (:col2_3, '
':col2_4)) AS bar')
def test_function(self):
self.assert_compile(func.foo(1, 2), 'foo(:foo_1, :foo_2)')
self.assert_compile(func.current_time(), 'CURRENT_TIME')
self.assert_compile(func.foo(), 'foo()')
m = MetaData()
t = Table(
'sometable', m, Column('col1', Integer), Column('col2', Integer))
self.assert_compile(select([func.max(t.c.col1)]),
'SELECT max(sometable.col1) AS max_1 FROM '
'sometable')
def test_function_overrides(self):
self.assert_compile(func.current_date(), "GETDATE()")
self.assert_compile(func.length(3), "LEN(:length_1)")
def test_extract(self):
t = table('t', column('col1'))
for field in 'day', 'month', 'year':
self.assert_compile(
select([extract(field, t.c.col1)]),
'SELECT DATEPART("%s", t.col1) AS anon_1 FROM t' % field)
def test_update_returning(self):
table1 = table(
'mytable',
column('myid', Integer),
column('name', String(128)),
column('description', String(128)))
u = update(
table1,
values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name')
u = update(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description')
u = update(
table1,
values=dict(
name='foo')).returning(table1).where(table1.c.name == 'bar')
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description WHERE mytable.name = '
':name_1')
u = update(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(u,
'UPDATE mytable SET name=:name OUTPUT '
'LEN(inserted.name) AS length_1')
def test_delete_returning(self):
table1 = table(
'mytable', column('myid', Integer),
column('name', String(128)), column('description', String(128)))
d = delete(table1).returning(table1.c.myid, table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name')
d = delete(table1).where(table1.c.name == 'bar'
).returning(table1.c.myid,
table1.c.name)
self.assert_compile(d,
'DELETE FROM mytable OUTPUT deleted.myid, '
'deleted.name WHERE mytable.name = :name_1')
def test_insert_returning(self):
table1 = table(
'mytable', column('myid', Integer),
column('name', String(128)), column('description', String(128)))
i = insert(
table1,
values=dict(name='foo')).returning(table1.c.myid, table1.c.name)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name VALUES '
'(:name)')
i = insert(table1, values=dict(name='foo')).returning(table1)
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'inserted.myid, inserted.name, '
'inserted.description VALUES (:name)')
i = insert(table1, values=dict(name='foo'
)).returning(func.length(table1.c.name))
self.assert_compile(i,
'INSERT INTO mytable (name) OUTPUT '
'LEN(inserted.name) AS length_1 VALUES '
'(:name)')
def test_limit_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(10)
self.assert_compile(
s,
"SELECT TOP 10 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
def test_limit_zero_using_top(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(0)
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
c = s.compile(dialect=mssql.MSDialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
def test_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).offset(20)
# test that the select is not altered with subsequent compile
# calls
for i in range(2):
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y FROM (SELECT t.x AS x, t.y "
"AS y, ROW_NUMBER() OVER (ORDER BY t.y) AS "
"mssql_rn FROM t WHERE t.x = :x_1) AS "
"anon_1 WHERE mssql_rn > :param_1",
checkparams={'param_1': 20, 'x_1': 5}
)
c = s.compile(dialect=mssql.MSDialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
def test_limit_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t.x AS x, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.MSDialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()['x'][1])
assert t.c.y in set(c._create_result_map()['y'][1])
def test_limit_offset_with_correlated_order_by(self):
t1 = table('t1', column('x', Integer), column('y', Integer))
t2 = table('t2', column('x', Integer), column('y', Integer))
order_by = select([t2.c.y]).where(t1.c.x == t2.c.x).as_scalar()
s = select([t1]).where(t1.c.x == 5).order_by(order_by) \
.limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t1.x AS x, t1.y AS y, "
"ROW_NUMBER() OVER (ORDER BY "
"(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
") AS mssql_rn "
"FROM t1 "
"WHERE t1.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={'param_1': 20, 'param_2': 10, 'x_1': 5}
)
c = s.compile(dialect=mssql.MSDialect())
eq_(len(c._result_columns), 2)
assert t1.c.x in set(c._create_result_map()['x'][1])
assert t1.c.y in set(c._create_result_map()['y'][1])
def test_limit_zero_offset_using_window(self):
t = table('t', column('x', Integer), column('y', Integer))
s = select([t]).where(t.c.x == 5).order_by(t.c.y).limit(0).offset(0)
# render the LIMIT of zero, but not the OFFSET
# of zero, so produces TOP 0
self.assert_compile(
s,
"SELECT TOP 0 t.x, t.y FROM t "
"WHERE t.x = :x_1 ORDER BY t.y",
checkparams={'x_1': 5}
)
def test_sequence_start_0(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence('', 0), primary_key=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(0,1), "
"PRIMARY KEY (id))"
)
def test_sequence_non_primary_key(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence(''), primary_key=False))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1))"
)
def test_sequence_ignore_nullability(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer, Sequence(''), nullable=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (id INTEGER NOT NULL IDENTITY(1,1))"
)
def test_table_pkc_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"PRIMARY KEY CLUSTERED (x, y))"
)
def test_table_uc_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('x', Integer, autoincrement=False),
Column('y', Integer, autoincrement=False),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True))
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NULL, "
"PRIMARY KEY (x), UNIQUE CLUSTERED (y))"
)
def test_index_clustering(self):
metadata = MetaData()
tbl = Table('test', metadata,
Column('id', Integer))
idx = Index("foo", tbl.c.id, mssql_clustered=True)
self.assert_compile(schema.CreateIndex(idx),
"CREATE CLUSTERED INDEX foo ON test (id)"
)
def test_index_ordering(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x.desc(), "y")
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x DESC, y)"
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer)
)
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)"
)
def test_drop_index_w_schema(self):
m = MetaData()
t1 = Table('foo', m,
Column('x', Integer),
schema='bar'
)
self.assert_compile(
schema.DropIndex(Index("idx_foo", t1.c.x)),
"DROP INDEX idx_foo ON bar.foo"
)
def test_index_extra_include_1(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=['y'])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
def test_index_extra_include_2(self):
metadata = MetaData()
tbl = Table(
'test', metadata,
Column('x', Integer), Column('y', Integer), Column('z', Integer))
idx = Index("foo", tbl.c.x, mssql_include=[tbl.c.y])
self.assert_compile(schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y)"
)
class SchemaTest(fixtures.TestBase):
def setup(self):
t = Table('sometable', MetaData(),
Column('pk_column', Integer),
Column('test_column', String)
)
self.column = t.c.test_column
dialect = mssql.dialect()
self.ddl_compiler = dialect.ddl_compiler(dialect,
schema.CreateTable(t))
def _column_spec(self):
return self.ddl_compiler.get_column_specification(self.column)
def test_that_mssql_default_nullability_emits_null(self):
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_none_nullability_does_not_emit_nullability(self):
self.column.nullable = None
eq_("test_column VARCHAR(max)", self._column_spec())
def test_that_mssql_specified_nullable_emits_null(self):
self.column.nullable = True
eq_("test_column VARCHAR(max) NULL", self._column_spec())
def test_that_mssql_specified_not_nullable_emits_not_null(self):
self.column.nullable = False
eq_("test_column VARCHAR(max) NOT NULL", self._column_spec())
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "mriqc/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
prefix_len = len(parentdir_prefix)
return {
"version": dirname[prefix_len:],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tag_len = len(TAG)
tags = set([r[tag_len:] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
prefix_len = len(tag_prefix)
r = ref[prefix_len:]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
prefix_len = len(tag_prefix)
pieces["closest-tag"] = full_tag[prefix_len:]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
root = os.path.realpath(__file__)
root_dir = os.path.dirname(root)
if os.path.isfile(os.path.join(root_dir, "VERSION")):
with open(os.path.join(root_dir, "VERSION")) as vfile:
version = vfile.readline().strip()
return {
"version": version,
"full-revisionid": None,
"dirty": None,
"error": None,
"date": None,
}
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
|
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, prod, greater,
asarray, inf,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError, inv
from scipy._lib._util import _asarray_validated, _lazywhere
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
# from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
Examples
--------
Find a solution to the system of equations:
``x0*cos(x1) = 4, x1*x0 - x1 = 5``.
>>> from scipy.optimize import fsolve
>>> def func(x):
... return [x[0] * np.cos(x[1]) - 4,
... x[1] * x[0] - x[1] - 5]
>>> root = fsolve(func, [1, 1])
>>> root
array([6.50409711, 0.90841421])
>>> np.isclose(func(root), [0.0, 0.0]) # func(root) should be almost 0.0.
array([ True, True])
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
sol['message'] = errors['unknown']
return sol
LEASTSQ_SUCCESS = [1, 2, 3, 4]
LEASTSQ_FAILURE = [5, 6, 7, 8]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
Should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided,
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
estimate of the Hessian. A value of None indicates a singular matrix,
which means the curvature in parameters `x` is numerically flat. To
obtain the covariance matrix of the parameters `x`, `cov_x` must be
multiplied by the variance of the residuals -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the keys:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
See Also
--------
least_squares : Newer interface to solve nonlinear least-squares problems
with bounds on the variables. See ``method=='lm'`` in particular.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
The solution, `x`, is always a 1-D array, regardless of the shape of `x0`,
or whether `x0` is a scalar.
Examples
--------
>>> from scipy.optimize import leastsq
>>> def func(x):
... return 2*(x-3)**2+1
>>> leastsq(func, 0)
(array([2.99999999]), 1)
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible." % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError]}
# The FORTRAN return value (possible return values are >= 0 and <= 8)
info = retval[-1]
if full_output:
cov_x = None
if info in LEASTSQ_SUCCESS:
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
else:
if info in LEASTSQ_FAILURE:
warnings.warn(errors[info][0], RuntimeWarning)
elif info == 0:
raise errors[info][1](errors[info][0])
return retval[0], info
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``.
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : array_like or object
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
functions with k predictors, but can actually be any object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the
initial values will all be 1 (if the number of parameters for the
function can be determined using introspection, otherwise a
ValueError is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-D `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-D `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-D `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False (default), only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit. Default is False.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters). Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized.
pcov : 2-D array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
Define the data to be fit with some noise:
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> np.random.seed(1729)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`:
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> popt
array([ 2.55423706, 1.35190947, 0.47450618])
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
Constrain the optimization to the region of ``0 <= a <= 3``,
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
>>> popt
array([ 2.43708906, 1. , 0.35015434])
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
sig = _getfullargspec(f)
args = sig.args
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# optimization may produce garbage for float32 inputs, cast them to float64
# NaNs cannot be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata, float)
else:
ydata = np.asarray(ydata, float)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata, float)
else:
xdata = np.asarray(xdata, float)
if ydata.size == 0:
raise ValueError("`ydata` must not be empty!")
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-D, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-D, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError as e:
raise ValueError("`sigma` must be positive definite.") from e
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if 'args' in kwargs:
# The specification for the model function `f` does not support
# additional arguments. Refer to the `curve_fit` docstring for
# acceptable call signatures of `f`.
raise ValueError("'args' is not a supported keyword argument.")
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
ysize = len(infodict['fvec'])
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (prod(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed point of the function: i.e., where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2",
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import re
import os
import uuid
import pytest
import popquotes.pmxbot
import pmxbot.dictlib
import pmxbot.storage
from pmxbot import core
from pmxbot import logging
from pmxbot import commands
from pmxbot import karma
from pmxbot import quotes
from pmxbot import system
def pytest_generate_tests(metafunc):
# any test that takes the iter_ parameter should be executed 100 times
if "iter_" in metafunc.funcargnames:
for i in range(100):
metafunc.addcall(funcargs=dict(iter_=i))
class Empty(object):
"""
Passed in to the individual commands instead of a client/event because
we don't normally care about them
"""
pass
c = Empty()
e = Empty()
def logical_xor(a, b):
return bool(a) ^ bool(b)
def onetrue(*args):
truthiness = filter(bool, args)
if len(truthiness) == 1:
return True
else:
return False
class TestCommands(object):
@classmethod
def setup_class(cls):
path = os.path.dirname(os.path.abspath(__file__))
configfile = os.path.join(path, 'testconf.yaml')
config = pmxbot.dictlib.ConfigDict.from_yaml(configfile)
cls.bot = core.initialize(config)
logging.Logger.store.message("logged", "testrunner", "some text")
@classmethod
def teardown_class(cls):
pmxbot.storage.SelectableStorage.finalize()
path = os.path.dirname(os.path.abspath(__file__))
os.remove(os.path.join(path, 'pmxbot.sqlite'))
@pytest.has_internet
def test_google(self):
"""
Basic google search for "pmxbot". Result must contain a link.
"""
res = commands.google(c, e, "#test", "testrunner", "pmxbot")
print(res)
assert "http" in res
@pytest.has_internet
def test_googlecalc_simple(self):
"""
Basic google calculator command - 1+1 must include 2 in results
"""
res = commands.googlecalc(c, e, "#test", "testrunner", "1+1")
print(res)
assert "2" in res
@pytest.has_internet
def test_googlecalc_complicated(self):
"""
More complicated google calculator command - 40 gallons in liters must
include 151.4 in results
"""
res = commands.googlecalc(c, e, "#test", "testrunner", "40 gallons "
"in liters")
print(res)
assert "151.4" in res
@pytest.has_internet
def test_googlecalc_supercomplicated(self):
"""
Supercomplicated google calculator command - 502 hogsheads per mile in
litres per km includes 74 and 388.9641 in results
"""
res = commands.googlecalc(c, e, "#test", "testrunner",
"502 hogsheads per mile in litres per km")
assert "388.9641" in res and "74" in res
@pytest.has_internet
def test_googlecalc_currency_usd_gbp(self):
"""
Test that google calculator for a currency conversion: 1 USD in GBP
"""
res = commands.googlecalc(c, e, "#test", "testrunner", "1 USD in GBP")
print(res)
assert re.match(r"1 (?:US|U\.S\.) dollars? = \d\.\d+ British "
r"pounds?(?: sterling)?", res)
@pytest.has_internet
def test_googlecalc_currency_czk_euro(self):
"""
Test that google calculator for a currency conversion: 12 CZK in euros
"""
res = commands.googlecalc(c, e, "#test", "testrunner", "12 CZK in "
"euros")
print(res)
assert re.match(r"12 Czech(?: Republic)? [Kk]orun(?:a|y)s? = "
r"\d\.\d+ [Ee]uros?", res)
# time patterns come as 4:20pm when queried from the U.S. and 16:20
# when queried from (at least some) other locales.
time_pattern = r'[0-9]{1,2}:[0-9]{2}(?:am|pm)?'
single_time_pattern = re.compile(time_pattern)
multi_time_pattern = re.compile(time_pattern + r'\s+\(.*\)')
@pytest.mark.xfail(reason="Time parsing broken")
@pytest.has_internet
def test_time_one(self):
"""
Check the time in Washington, DC. Must match time_pattern.
"""
res = commands.googletime(c, e, "#test", "testrunner",
"Washington, DC")
res = list(res)
assert res
for line in res:
assert self.single_time_pattern.match(line)
assert len(res) == 1
@pytest.mark.xfail(reason="Time parsing broken")
@pytest.has_internet
def test_time_three(self):
"""
Check the time in three cities. Must include something that
matches the time pattern on each line
"""
res = commands.googletime(c, e, "#test", "testrunner",
"Washington, DC | Palo Alto, CA | London")
res = list(res)
assert res
for line in res:
assert self.multi_time_pattern.match(line)
assert len(res) == 3
@pytest.mark.xfail(reason="Time parsing broken")
@pytest.has_internet
def test_time_all(self):
"""
Check the time in "all" cities. Must include something that
matches the time pattern on each line
"""
res = commands.googletime(c, e, "#test", "testrunner", "all")
res = list(res)
assert res
for line in res:
assert self.multi_time_pattern.match(line)
assert len(res) == 4
@pytest.mark.xfail(reason="Google APIs disabled")
def test_weather_one(self):
"""
Check the weather in Washington, DC. Must include something that looks
like a weather XX:XX(AM/PM)
"""
res = commands.weather(c, e, "#test", "testrunner", "Washington, DC")
for line in res:
print(line)
assert re.match(r".+\. Currently: (?:-)?[0-9]{1,3}F/(?:-)?"
r"[0-9]{1,2}C, .+\.\W+[A-z]{3}: (?:-)?[0-9]{1,3}F/(?:-)?"
r"[0-9]{1,2}C, ", line)
@pytest.mark.xfail(reason="Google APIs disabled")
def test_weather_three(self):
"""
Check the weather in three cities. Must include something that looks
like a weather XX:XX(AM/PM) on each line
"""
places = "Washington, DC", "Palo Alto, CA", "London"
places_spec = ' | '.join(places)
res = commands.weather(c, e, "#test", "testrunner", places_spec)
for line in res:
print(line)
assert re.match(r".+\. Currently: (?:-)?[0-9]{1,3}F/(?:-)?"
r"[0-9]{1,2}C, .+\.\W+[A-z]{3}: (?:-)?[0-9]{1,3}F/(?:-)?"
r"[0-9]{1,2}C, ", line)
@pytest.mark.xfail(reason="Google APIs disabled")
def test_weather_all(self):
"""
Check the weather in "all" cities. Must include something that looks
like
a weather XX:XX(AM/PM) on each line
"""
res = commands.weather(c, e, "#test", "testrunner", "all")
for line in res:
print(line)
assert re.match(r".+\. Currently: (?:-)?[0-9]{1,3}F/(?:-)?"
r"[0-9]{1,2}C, .+\.\W+[A-z]{3}: (?:-)?[0-9]{1,3}F/(?:-)?"
r"[0-9]{1,2}C, ", line)
def test_boo(self):
"""
Test "boo foo"
"""
subject = "foo"
pre = karma.Karma.store.lookup(subject)
res = commands.boo(c, e, "#test", "testrunner", subject)
assert res == "/me BOOO %s!!! BOOO!!!" % subject
post = karma.Karma.store.lookup(subject)
assert post == pre - 1
def test_troutslap(self):
"""
Test "troutslap foo"
"""
subject = "foo"
pre = karma.Karma.store.lookup(subject)
res = commands.troutslap(c, e, "#test", "testrunner", subject)
assert res == "/me slaps %s around a bit with a large trout" % subject
post = karma.Karma.store.lookup(subject)
assert post == pre - 1
def test_keelhaul(self):
"""
Test "keelhaul foo"
"""
subject = "foo"
pre = karma.Karma.store.lookup(subject)
res = commands.keelhaul(c, e, "#test", "testrunner", subject)
assert res == ("/me straps %s to a dirty rope, tosses 'em overboard "
"and pulls with great speed. Yarrr!" % subject)
post = karma.Karma.store.lookup(subject)
assert post == pre - 1
def test_motivate(self):
"""
Test that motivate actually works.
"""
subject = "foo"
pre = karma.Karma.store.lookup(subject)
res = commands.motivate(c, e, "#test", "testrunner", subject)
assert res == "you're doing good work, %s!" % subject
post = karma.Karma.store.lookup(subject)
assert post == pre + 1
def test_motivate_with_spaces(self):
"""
Test that motivate strips beginning and ending whitespace
"""
subject = "foo"
pre = karma.Karma.store.lookup(subject)
res = commands.motivate(c, e, "#test", "testrunner",
" %s " % subject)
assert res == "you're doing good work, %s!" % subject
post = karma.Karma.store.lookup(subject)
assert post == pre + 1
def test_demotivate(self):
"""
Test that demotivate actually works.
"""
subject = "foo"
pre = karma.Karma.store.lookup(subject)
res = commands.demotivate(c, e, "#test", "testrunner", subject)
assert res == "you're doing horrible work, %s!" % subject
post = karma.Karma.store.lookup(subject)
assert post == pre - 1
def test_imotivate(self):
"""
Test that ironic/sarcastic motivate actually works.
"""
subject = "foo"
pre = karma.Karma.store.lookup(subject)
res = commands.imotivate(c, e, "#test", "testrunner", subject)
assert res == """you're "doing" "good" "work", %s!""" % subject
post = karma.Karma.store.lookup(subject)
assert post == pre - 1
def test_add_quote(self):
"""
Try adding a quote
"""
quote = "And then she said %s" % str(uuid.uuid4())
res = quotes.quote(c, e, "#test", "testrunner", "add %s" % quote)
assert res == "Quote added!"
cursor = logging.Logger.store.db.cursor()
cursor.execute("select count(*) from quotes where library = 'pmx' "
"and quote = ?", (quote,))
numquotes = cursor.fetchone()[0]
assert numquotes == 1
def test_add_and_retreive_quote(self):
"""
Try adding a quote, then retrieving it
"""
id = str(uuid.uuid4())
quote = "So I says to Mabel, I says, %s" % id
res = quotes.quote(c, e, "#test", "testrunner", "add %s" % quote)
assert res == "Quote added!"
cursor = logging.Logger.store.db.cursor()
cursor.execute("select count(*) from quotes where library = 'pmx' "
"and quote = ?", (quote,))
numquotes = cursor.fetchone()[0]
assert numquotes == 1
res = quotes.quote(c, e, "#test", "testrunner", id)
assert res == "(1/1): %s" % quote
def test_roll(self):
"""
Roll a die, both with no arguments and with some numbers
"""
res = int(commands.roll(c, e, "#test", "testrunner", "").split()[-1])
assert res >= 0 and res <= 100
n = 6668
res = commands.roll(c, e, "#test", "testrunner", "%s" % n).split()[-1]
res = int(res)
assert res >= 0 and res <= n
@pytest.has_internet
def test_ticker_goog(self):
"""
Get the current stock price of Google.
GOOG at 4:00pm (ET): 484.81 (1.5%)
"""
res = commands.ticker(c, e, "#test", "testrunner", "goog")
print(res)
assert re.match(r"^GOOG at \d{1,2}:\d{2}(?:am|pm) \([A-z]{1,3}\): "
r"\d{2,4}.\d{1,4} \(\-?\d{1,3}.\d%\)$", res), res
@pytest.has_internet
def test_ticker_yougov(self):
"""
Get the current stock price of YouGov.
YOU.L at 10:37am (ET): 39.40 (0.4%)
"""
res = commands.ticker(c, e, "#test", "testrunner", "you.l")
print(res)
assert re.match(r"^YOU.L at \d{1,2}:\d{2}(?:am|pm) \([A-z]{1,3}\): "
r"\d{1,4}.\d{2,4} \(\-?\d{1,3}.\d%\)$", res), res
@pytest.has_internet
def test_ticker_nasdaq(self):
"""
Get the current stock price of the NASDAQ.
^IXIC at 10:37am (ET): 2490.40 (0.4%)
"""
res = commands.ticker(c, e, "#test", "testrunner", "^ixic")
print(res)
assert re.match(r"^\^IXIC at \d{1,2}:\d{2}(?:am|pm) \([A-z]{1,3}\): "
r"\d{4,5}.\d{2} \(\-?\d{1,3}.\d%\)$", res), res
def test_pick_or(self):
"""
Test the pick command with a simple or expression
"""
res = commands.pick(c, e, "#test", "testrunner", "fire or acid")
assert logical_xor("fire" in res, "acid" in res)
assert " or " not in res
def test_pick_or_intro(self):
"""
Test the pick command with an intro and a simple "or" expression
"""
res = commands.pick(c, e, "#test", "testrunner",
"how would you like to die, pmxbot: fire or acid")
assert logical_xor("fire" in res, "acid" in res)
assert "die" not in res and "pmxbot" not in res and " or " not in res
def test_pick_comma(self):
"""
Test the pick command with two options separated by commas
"""
res = commands.pick(c, e, "#test", "testrunner", "fire, acid")
assert logical_xor("fire" in res, "acid" in res)
def test_pick_comma_intro(self):
"""
Test the pick command with an intro followed by two options separted
by commas
"""
res = commands.pick(c, e, "#test", "testrunner",
"how would you like to die, pmxbot: fire, acid")
assert logical_xor("fire" in res, "acid" in res)
assert "die" not in res and "pmxbot" not in res
def test_pick_comma_or_intro(self):
"""
Test the pick command with an intro followed by options with commands
and ors
"""
res = commands.pick(c, e, "#test", "testrunner",
"how would you like to die, pmxbot: gun, fire, acid or "
"defenestration")
assert onetrue("gun" in res, "fire" in res, "acid" in res,
"defenestration" in res)
assert "die" not in res and "pmxbot" not in res and " or " not in res
def test_lunch(self):
"""
Test that the lunch command selects one of the list options
"""
res = commands.lunch(c, e, "#test", "testrunner", "PA")
assert res in ["Pasta?", "Thaiphoon", "Pluto's",
"Penninsula Creamery", "Kan Zeman"]
def test_karma_check_self_blank(self):
"""
Determine your own, blank, karma.
"""
id = str(uuid.uuid4())[:15]
res = karma.karma(c, e, "#test", id, "")
assert re.match(r"^%s has 0 karmas$" % id, res)
def test_karma_check_other_blank(self):
"""
Determine some else's blank/new karma.
"""
id = str(uuid.uuid4())
res = karma.karma(c, e, "#test", "testrunner", id)
assert re.match("^%s has 0 karmas$" % id, res)
def test_karma_set_and_check(self):
"""
Take a new entity, give it some karma, check that it has more
"""
id = str(uuid.uuid4())
res = karma.karma(c, e, "#test", "testrunner", id)
assert re.match("^%s has 0 karmas$" % id, res)
res = karma.karma(c, e, "#test", "testrunner", "%s++" %id)
res = karma.karma(c, e, "#test", "testrunner", "%s++" %id)
res = karma.karma(c, e, "#test", "testrunner", "%s++" %id)
res = karma.karma(c, e, "#test", "testrunner", "%s--" %id)
res = karma.karma(c, e, "#test", "testrunner", id)
assert re.match(r"^%s has 2 karmas$" % id, res)
def test_karma_set_and_check_with_space(self):
"""
Take a new entity that has a space in it's name, give it some karma,
check that it has more
"""
id = str(uuid.uuid4()).replace("-", " ")
res = karma.karma(c, e, "#test", "testrunner", id)
assert re.match("^%s has 0 karmas$" % id, res)
res = karma.karma(c, e, "#test", "testrunner", "%s++" %id)
res = karma.karma(c, e, "#test", "testrunner", "%s++" %id)
res = karma.karma(c, e, "#test", "testrunner", "%s++" %id)
res = karma.karma(c, e, "#test", "testrunner", "%s--" %id)
res = karma.karma(c, e, "#test", "testrunner", id)
assert re.match(r"^%s has 2 karmas$" % id, res)
def test_karma_randomchange(self):
"""
Take a new entity that has a space in it's name, give it some karma,
check that it has more
"""
id = str(uuid.uuid4())
flags = {}
i = 0
karmafetch = re.compile(r"^%s has (\-?\d+) karmas$" % id)
while len(flags) < 3 and i <= 30:
res = karma.karma(c, e, "#test", "testrunner", id)
prekarma = int(karmafetch.findall(res)[0])
change = karma.karma(c, e, "#test", "testrunner", "%s~~" % id)
assert change in ["%s karma++" % id, "%s karma--" % id,
"%s karma shall remain the same" % id]
if change.endswith('karma++'):
flags['++'] = True
res = karma.karma(c, e, "#test", "testrunner", id)
postkarma = int(karmafetch.findall(res)[0])
assert postkarma == prekarma + 1
elif change.endswith('karma--'):
flags['--'] = True
res = karma.karma(c, e, "#test", "testrunner", id)
postkarma = int(karmafetch.findall(res)[0])
assert postkarma == prekarma - 1
elif change.endswith('karma shall remain the same'):
flags['same'] = True
res = karma.karma(c, e, "#test", "testrunner", id)
postkarma = int(karmafetch.findall(res)[0])
assert postkarma == prekarma
i+=1
assert len(flags) == 3
assert i < 30
def test_calc_simple(self):
"""
Test the built-in python calculator with a simple expression - 2+2
"""
res = commands.calc(c, e, "#test", "testrunner", "2+2")
print(res)
assert res == "4"
def test_calc_complex(self):
"""
Test the built-in python calculator with a more complicated formula
((((781**2)*5)/92835.3)+4)**0.5
"""
res = commands.calc(c, e, "#test", "testrunner",
"((((781**2)*5)/92835.3)+4)**0.5")
print(res)
assert res.startswith("6.070566")
@pytest.has_internet
def test_define_keyboard(self):
"""
Test the dictionary with the word keyboard.
"""
res = commands.defit(c, e, "#test", "testrunner", "keyboard")
assert isinstance(res, unicode)
assert res == ("Wordnik says: A set of keys, as on a computer "
"terminal, word processor, typewriter, or piano.")
@pytest.has_internet
def test_define_irc(self):
"""
Test the dictionary with the word IRC.
"""
res = commands.defit(c, e, "#test", "testrunner", " IRC \t")
assert isinstance(res, unicode)
assert res == ("Wordnik says: An international computer network of "
"Internet servers, using its own protocol through which "
"individual users can hold real-time online conversations.")
@pytest.has_internet
def test_define_notaword(self):
"""
Test the dictionary with a nonsense word.
"""
res = commands.defit(c, e, "#test", "testrunner", "notaword")
assert isinstance(res, unicode)
assert res == "Wordnik does not have a definition for that."
@pytest.has_internet
def test_urb_irc(self):
"""
Test the urban dictionary with the word IRC.
"""
res = commands.urbandefit(c, e, "#test", "testrunner", "irc")
assert "Internet Relay Chat" in res
assert "protocol" in res.lower()
@pytest.has_internet
def test_acronym_irc(self):
"""
Test acronym finder with the word IRC.
"""
res = commands.acit(c, e, "#test", "testrunner", "irc")
assert "Internet Relay Chat" in res
assert "|" in res
def test_progress(self):
"""
Test the progress bar
"""
res = commands.progress(c, e, "#test", "testrunner", "1|98123|30")
print(res)
assert res == "1 [=== ] 98123"
def test_strategy(self):
"""
Test the social strategy thingie
"""
res = commands.strategy(c, e, "#test", "testrunner", "")
print(res)
assert res != ""
@pytest.has_internet
def test_paste_newuser(self):
"""
Test the pastebin with an unknown user
"""
pytest.xfail("a.libpa.st is down")
person = str(uuid.uuid4())[:9]
res = commands.paste(c, e, '#test', person, '')
print(res)
assert res == ("hmm.. I didn't find a recent paste of yours, %s. "
"Checkout http://a.libpa.st/" % person)
@pytest.has_internet
def test_paste_real_user(self):
"""
Test the pastebin with a valid user with an existing paste
"""
pytest.xfail("a.libpa.st is down")
person = 'vbSptH3ByfQQ6h'
res = commands.paste(c, e, '#test', person, '')
assert res == "http://a.libpa.st/40a4345a-4e4b-40d8-ad06-c0a22a26b282"
def test_qbiu_person(self):
"""
Test the qbiu function with a specified person.
"""
bitcher = "all y'all"
res = commands.bitchingisuseless(c, e, '#test', 'testrunner', bitcher)
print(res)
assert res == ("Quiet bitching is useless, all y'all. Do something "
"about it.")
def test_qbiu_blank(self):
"""
Test the qbiu function with a specified person.
"""
res = commands.bitchingisuseless(c, e, '#test', 'testrunner', '')
print(res)
assert res == ("Quiet bitching is useless, foo'. Do something about "
"it.")
def test_excuse(self):
import excuses
gen = excuses.RandomExcuseGenerator.create_local()
gen.pmxbot_excuse(c, e, '#test', 'testrunner', '')
def test_popquotes(self):
popquotes.pmxbot.install_commands()
res = popquotes.pmxbot.bender(c, e, '#test', 'testrunner', '')
assert len(res) > 5
def test_rand_bot(self, iter_):
res = commands.rand_bot(c, e, '#test', 'testrunner', '')
if res is None: return
if not isinstance(res, basestring):
res = u''.join(res)
assert len(res)
def test_logo(self):
lines = list(system.logo(c, e, '#test', 'testrunner', ''))
assert len(lines)
def test_help(self):
help = system.help(c, e, '#test', 'testrunner', '')
result = ''.join(help)
assert 'help' in result
def test_help_specific(self):
help = system.help(c, e, '#test', 'testrunner', 'help')
result = ''.join(help)
assert 'help' in result
|
|
# orm/deprecated_interfaces.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import event, util
from interfaces import EXT_CONTINUE
class MapperExtension(object):
"""Base implementation for :class:`.Mapper` event hooks.
.. note:: :class:`.MapperExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.MapperEvents`.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ('instrument_class',))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self, listener,
(
'init_instance',
'init_failed',
'translate_row',
'create_instance',
'append_result',
'populate_instance',
'reconstruct_instance',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
))
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
if meth == 'reconstruct_instance':
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(ls_meth, self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_failed
event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
If the ultimate return value is EXT_CONTINUE, the row
is not translated.
"""
return EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
mapper
The mapper doing the operation
selectcontext
The QueryContext generated from the Query.
row
The result row from the database
class\_
The class we are mapping.
return value
A new object instance, or EXT_CONTINUE
"""
return EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
If this method returns EXT_CONTINUE, result appending will proceed
normally. if this method returns any other value or None,
result appending will not proceed for this instance, giving
this extension an opportunity to do the appending itself, if
desired.
mapper
The mapper doing the operation.
selectcontext
The QueryContext generated from the Query.
row
The result row from the database.
instance
The object instance to be appended to the result.
result
List to which results are being appended.
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
:class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
If this method returns EXT_CONTINUE, instance population will
proceed normally. If any other value or None is returned,
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
As of 0.5, most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. note:: :class:`.SessionExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.SessionEvents`.
Subclasses may be installed into a :class:`.Session` (or
:func:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
'before_commit',
'after_commit',
'after_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'after_attach',
'after_bulk_update',
'after_bulk_delete',
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush( self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin( self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update( self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete( self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. note:: :class:`.AttributeExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.AttributeEvents`.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
event.listen(self, 'append', listener.append,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'set', listener.set,
active_history=listener.active_history,
raw=True, retval=True)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Exceptions in L{twisted.mail}.
"""
from __future__ import absolute_import, division
class IMAP4Exception(Exception):
pass
class IllegalClientResponse(IMAP4Exception):
pass
class IllegalOperation(IMAP4Exception):
pass
class IllegalMailboxEncoding(IMAP4Exception):
pass
class MailboxException(IMAP4Exception):
pass
class MailboxCollision(MailboxException):
def __str__(self):
return 'Mailbox named %s already exists' % self.args
class NoSuchMailbox(MailboxException):
def __str__(self):
return 'No mailbox named %s exists' % self.args
class ReadOnlyMailbox(MailboxException):
def __str__(self):
return 'Mailbox open in read-only state'
class UnhandledResponse(IMAP4Exception):
pass
class NegativeResponse(IMAP4Exception):
pass
class NoSupportedAuthentication(IMAP4Exception):
def __init__(self, serverSupports, clientSupports):
IMAP4Exception.__init__(
self, 'No supported authentication schemes available')
self.serverSupports = serverSupports
self.clientSupports = clientSupports
def __str__(self):
return (IMAP4Exception.__str__(self)
+ ': Server supports %r, client supports %r'
% (self.serverSupports, self.clientSupports))
class IllegalServerResponse(IMAP4Exception):
pass
class IllegalIdentifierError(IMAP4Exception):
pass
class IllegalQueryError(IMAP4Exception):
pass
class MismatchedNesting(IMAP4Exception):
pass
class MismatchedQuoting(IMAP4Exception):
pass
class SMTPError(Exception):
pass
class SMTPClientError(SMTPError):
"""
Base class for SMTP client errors.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=False,
retry=False):
"""
@param code: The SMTP response code associated with this error.
@param resp: The string response associated with this error.
@param log: A string log of the exchange leading up to and including
the error.
@type log: L{bytes}
@param isFatal: A boolean indicating whether this connection can
proceed or not. If True, the connection will be dropped.
@param retry: A boolean indicating whether the delivery should be
retried. If True and the factory indicates further retries are
desirable, they will be attempted, otherwise the delivery will be
failed.
"""
self.code = code
self.resp = resp
self.log = log
self.addresses = addresses
self.isFatal = isFatal
self.retry = retry
def __str__(self):
if self.code > 0:
res = ["%.3d %s" % (self.code, self.resp)]
else:
res = [self.resp]
if self.log:
res.append(self.log)
res.append('')
return '\n'.join(res)
class ESMTPClientError(SMTPClientError):
"""
Base class for ESMTP client errors.
"""
class EHLORequiredError(ESMTPClientError):
"""
The server does not support EHLO.
This is considered a non-fatal error (the connection will not be dropped).
"""
class AUTHRequiredError(ESMTPClientError):
"""
Authentication was required but the server does not support it.
This is considered a non-fatal error (the connection will not be dropped).
"""
class TLSRequiredError(ESMTPClientError):
"""
Transport security was required but the server does not support it.
This is considered a non-fatal error (the connection will not be dropped).
"""
class AUTHDeclinedError(ESMTPClientError):
"""
The server rejected our credentials.
Either the username, password, or challenge response
given to the server was rejected.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class AuthenticationError(ESMTPClientError):
"""
An error occurred while authenticating.
Either the server rejected our request for authentication or the
challenge received was malformed.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class SMTPTLSError(ESMTPClientError):
"""
An error occurred while negiotiating for transport security.
This is considered a non-fatal error (the connection will not be dropped).
"""
class SMTPConnectError(SMTPClientError):
"""
Failed to connect to the mail exchange host.
This is considered a fatal error. A retry will be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True,
retry=True):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal,
retry)
class SMTPTimeoutError(SMTPClientError):
"""
Failed to receive a response from the server in the expected time period.
This is considered a fatal error. A retry will be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True,
retry=True):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal,
retry)
class SMTPProtocolError(SMTPClientError):
"""
The server sent a mangled response.
This is considered a fatal error. A retry will not be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True,
retry=False):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal,
retry)
class SMTPDeliveryError(SMTPClientError):
"""
Indicates that a delivery attempt has had an error.
"""
class SMTPServerError(SMTPError):
def __init__(self, code, resp):
self.code = code
self.resp = resp
def __str__(self):
return "%.3d %s" % (self.code, self.resp)
class SMTPAddressError(SMTPServerError):
def __init__(self, addr, code, resp):
from twisted.mail.smtp import Address
SMTPServerError.__init__(self, code, resp)
self.addr = Address(addr)
def __str__(self):
return "%.3d <%s>... %s" % (self.code, self.addr, self.resp)
class SMTPBadRcpt(SMTPAddressError):
def __init__(self, addr, code=550,
resp='Cannot receive for specified address'):
SMTPAddressError.__init__(self, addr, code, resp)
class SMTPBadSender(SMTPAddressError):
def __init__(self, addr, code=550, resp='Sender not acceptable'):
SMTPAddressError.__init__(self, addr, code, resp)
class AddressError(SMTPError):
"""
Parse error in address
"""
class POP3Error(Exception):
"""
The base class for POP3 errors.
"""
pass
class _POP3MessageDeleted(Exception):
"""
An internal control-flow error which indicates that a deleted message was
requested.
"""
class POP3ClientError(Exception):
"""
The base class for all exceptions raised by POP3Client.
"""
class InsecureAuthenticationDisallowed(POP3ClientError):
"""
An error indicating secure authentication was required but no mechanism
could be found.
"""
class TLSError(POP3ClientError):
"""
An error indicating secure authentication was required but either the
transport does not support TLS or no TLS context factory was supplied.
"""
class TLSNotSupportedError(POP3ClientError):
"""
An error indicating secure authentication was required but the server does
not support TLS.
"""
class ServerErrorResponse(POP3ClientError):
"""
An error indicating that the server returned an error response to a
request.
@ivar consumer: See L{__init__}
"""
def __init__(self, reason, consumer=None):
"""
@type reason: L{bytes}
@param reason: The server response minus the status indicator.
@type consumer: callable that takes L{object}
@param consumer: The function meant to handle the values for a
multi-line response.
"""
POP3ClientError.__init__(self, reason)
self.consumer = consumer
class LineTooLong(POP3ClientError):
"""
An error indicating that the server sent a line which exceeded the
maximum line length (L{LineOnlyReceiver.MAX_LENGTH}).
"""
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 Vince Durham
# Copyright (c) 2013-2014 The Blakecoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING.
#
import logging
import argparse
import os
import sys
import traceback
import json
import base64
from binascii import a2b_hex, b2a_hex
import socket
from datetime import datetime
from twisted.internet import defer, reactor, threads
from twisted.web import server, resource
from twisted.internet.error import ConnectionRefusedError
import twisted.internet.error
from urlparse import urlsplit
import httplib
import thread
try:
from merkletree import MerkleTree
have_merkletree = True
except ImportError:
have_merkletree = False
class MerkleTree:
def __init__(self, L, detailed=False):
self.detail = L
__version__ = '1.2.3'
'''
merge-mine-proxy
Run behind Eloipool for a set of auxiliary chains.
Output is in the form:
2014-05-05T00:00:00,solve,=,1,HASH
Where the fields are:
* UTC date and time in ISO format
* The word "solve"
* 1 if the proof of work was accepted by each aux chain
* extra ,1 per aux chain e.g solve,=,1,1,HASH
* HASH parent block hash
'''
AUX_UPDATE_INTERVAL = 5
MERKLE_TREES_TO_KEEP = 240
logger = logging.getLogger('merged-mine-proxy')
logger.setLevel(logging.DEBUG)
def reverse_chunks(s, l):
return ''.join(reversed([s[x:x+l] for x in xrange(0, len(s), l)]))
def getresponse(http, path, postdata, headers):
http.request('POST', path, postdata, headers)
return http.getresponse().read()
class Error(Exception):
def __init__(self, code, message, data=''):
if not isinstance(code, int):
raise TypeError('code must be an int')
if not isinstance(message, unicode):
raise TypeError('message must be a unicode')
self._code, self._message, self._data = code, message, data
def __str__(self):
return '%i %s %r' % (self._code, self._message, self._data)
def _to_obj(self):
return {
'code': self._code,
'message': self._message,
'data': self._data,
}
class Proxy(object):
def __init__(self, url):
(schema, netloc, path, query, fragment) = urlsplit(url)
auth = None
if netloc.find('@') >= 0:
(auth, netloc) = netloc.split("@")
if path == "":
path = "/"
self._url = "%s://%s%s" % (schema, netloc, path)
self._path = path
self._auth = auth
self._netloc = netloc
self._http = None
def callRemote(self, method, *params):
try:
if self._http is None:
(host, port) = self._netloc.split(":")
self._http = httplib.HTTPConnection(host, port)
try:
self._http.connect()
except socket.error:
raise httplib.HTTPException()
id_ = 0
headers = {
'Content-Type': 'text/json',
}
if self._auth is not None:
headers['Authorization'] = 'Basic ' + base64.b64encode(self._auth)
resp = None
postdata=json.dumps({
'jsonrpc': '2.0',
'method': method,
'params': params,
'id': id_,
})
content = getresponse(self._http, self._path, postdata, headers)
resp = json.loads(content)
if resp['id'] != id_:
raise ValueError('invalid id')
if 'error' in resp and resp['error'] is not None:
raise Error(resp['error']['code'], resp['error']['message'])
return resp['result']
except httplib.HTTPException:
self._http = None
#logger.error("Could not connect to %s", self._url)
raise Error(-32099, u'Could not connect to backend', self._url)
def __getattr__(self, attr):
if attr.startswith('rpc_'):
return lambda *params: self.callRemote(attr[len('rpc_'):], *params)
raise AttributeError('%r object has no attribute %r' % (self.__class__.__name__, attr))
class Server(resource.Resource):
extra_headers = None
def render(self, request):
def finish(x):
if request._disconnected:
return
if x is not None:
request.write(x)
request.finish()
def finish_error(fail):
if request._disconnected:
return
request.setResponseCode(500) # won't do anything if already written to
request.write('---ERROR---')
request.finish()
fail.printTraceback()
defer.maybeDeferred(resource.Resource.render, self, request).addCallbacks(finish, finish_error)
return server.NOT_DONE_YET
@defer.inlineCallbacks
def render_POST(self, request):
# missing batching, 1.0 notifications
data = request.content.read()
if self.extra_headers is not None:
for name, value in self.extra_headers.iteritems():
request.setHeader(name, value)
try:
try:
req = json.loads(data)
except Exception:
raise RemoteError(-32700, u'Parse error')
except Error, e:
# id unknown
request.write(json.dumps({
'jsonrpc': '2.0',
'id': None,
'result': None,
'error': e._to_obj(),
}))
id_ = req.get('id', None)
try:
try:
method = req['method']
if not isinstance(method, unicode):
raise ValueError()
params = req.get('params', [])
if not isinstance(params, list):
raise ValueError()
except Exception:
raise Error(-32600, u'Invalid Request')
method_name = 'rpc_' + method
if not hasattr(self, method_name):
raise Error(-32601, u'Method not found')
method_meth = getattr(self, method_name)
df = defer.maybeDeferred(method_meth, *params)
if id_ is None:
return
try:
result = yield df
#except Error, e:
#w raise e
except Exception, e:
logger.error(str(e))
raise Error(-32099, u'Unknown error: ' + str(e))
res = json.dumps({
'jsonrpc': '2.0',
'id': id_,
'result': result,
'error': None,
})
request.setHeader('content-length', str(len(res)))
request.write(res)
except Error, e:
res = json.dumps({
'jsonrpc': '2.0',
'id': id_,
'result': None,
'error': e._to_obj(),
})
request.setHeader('content-length', str(len(res)))
request.write(res)
class Listener(Server):
def __init__(self, parent, auxs, merkle_size, rewrite_target):
Server.__init__(self)
self.parent = parent
self.auxs = auxs
self.chain_ids = [None for i in auxs]
self.aux_targets = [None for i in auxs]
self.merkle_size = merkle_size
self.merkle_tree_queue = []
self.merkle_trees = {}
self.rewrite_target = None
if rewrite_target == 32:
self.rewrite_target = reverse_chunks("0000000007ffffffffffffffffffffffffffffffffffffffffffffffffffffff", 2)
elif rewrite_target == 1024:
self.rewrite_target = reverse_chunks("00000000000fffffffffffffffffffffffffffffffffffffffffffffffffffff", 2)
if merkle_size > 255:
raise ValueError('merkle size up to 255')
self.putChild('', self)
def merkle_branch(self, chain_index, merkle_tree):
step = self.merkle_size
i1 = chain_index
j = 0
branch = []
while step > 1:
i = min(i1^1, step-1)
branch.append(merkle_tree[i + j])
i1 = i1 >> 1
j += step
step = (step + 1) / 2
return branch
def calc_merkle_index(self, chain):
chain_id = self.chain_ids[chain]
rand = 0 # nonce
rand = (rand * 1103515245 + 12345) & 0xffffffff;
rand += chain_id;
rand = (rand * 1103515245 + 12345) & 0xffffffff;
return rand % self.merkle_size
@defer.inlineCallbacks
def update_auxs(self):
# create merkle leaves with arbitrary initial value
merkle_leaves = [ ('0' * 62) + ("%02x" % x) for x in range(self.merkle_size) ]
# ask each aux chain for a block
for chain in range(len(self.auxs)):
aux_block = (yield self.auxs[chain].rpc_getauxblock())
aux_block_hash = aux_block['hash']
self.chain_ids[chain] = aux_block['chainid']
chain_merkle_index = self.calc_merkle_index(chain)
merkle_leaves[chain_merkle_index] = aux_block_hash
self.aux_targets[chain] = reverse_chunks(aux_block['target'], 2) # fix endian
# create merkle tree
if len(merkle_leaves) > 1:
merkle_tree = map(lambda s: b2a_hex(s[::-1]), MerkleTree(map(lambda s: a2b_hex(s)[::-1], merkle_leaves), detailed=True).detail)
else:
merkle_tree = merkle_leaves
merkle_root = merkle_tree[-1]
if not self.merkle_trees.has_key(merkle_root):
# Tell bitcoind the new merkle root
MMAux = merkle_root + ("%02x000000" % self.merkle_size) + "00000000"
MMAux = 'fabe6d6d' + MMAux
for p in self.parent:
p.rpc_setworkaux('MM', MMAux)
# remember new tree
self.merkle_trees[merkle_root] = merkle_tree
self.merkle_tree_queue.append(merkle_root)
if len(self.merkle_tree_queue) > MERKLE_TREES_TO_KEEP:
# forget one tree
old_root = self.merkle_tree_queue.pop(0)
del self.merkle_trees[old_root]
def update_aux_process(self):
reactor.callLater(AUX_UPDATE_INTERVAL, self.update_aux_process)
self.update_auxs()
def rpc_getaux(self, data=None):
''' Use this rpc call to get the aux chain merkle root and aux target. Pool software
can then call getworkaux(aux) instead of going through this proxy. It is enough to call this
once a second.
'''
try:
# Get aux based on the latest tree
merkle_root = self.merkle_tree_queue[-1]
# nonce = 0, one byte merkle size
aux = merkle_root + ("%02x000000" % self.merkle_size) + "00000000"
result = {'aux': aux}
if self.rewrite_target:
result['aux_target'] = self.rewrite_target
else:
# Find highest target
targets = []
targets.extend(self.aux_targets)
targets.sort()
result['aux_target'] = reverse_chunks(targets[-1], 2) # fix endian
return result
except Exception:
logger.error(traceback.format_exc())
raise
@defer.inlineCallbacks
def rpc_gotwork(self, solution):
try:
# Submit work upstream
any_solved = False
aux_solved = []
parent_hash = solution['hash']
blkhdr = solution['header']
coinbaseMerkle = solution['coinbaseMrkl']
pos = coinbaseMerkle.find('fabe6d6d') + 8
if not pos:
logger.error("failed to find aux in coinbase")
defer.returnValue(False)
return
slnaux = coinbaseMerkle[pos:pos+80]
merkle_root = slnaux[:-16] # strip off size and nonce
if not self.merkle_trees.has_key(merkle_root):
logger.error("stale merkle root %s", merkle_root)
defer.returnValue(False)
return
merkle_tree = self.merkle_trees[merkle_root]
# submit to each aux chain
for chain in range(len(self.auxs)):
chain_merkle_index = self.calc_merkle_index(chain)
aux_solved.append(False)
# try submitting if under target
# TODO: self.aux_targets[chain] > parent_hash and
if not chain_merkle_index is None:
branch = self.merkle_branch(chain_merkle_index, merkle_tree)
"""
proof = (
yield self.parent.rpc_getworkaux("", data, chain_merkle_index, *branch))
if proof is False:
logger.error("aux pow request rejected by parent, chain %d", chain)
else:
"""
auxpow = coinbaseMerkle + '%02x' % (len(branch),)
for mb in branch:
auxpow += b2a_hex(a2b_hex(mb)[::-1])
auxpow += ('%02x000000' % (chain_merkle_index,)) + blkhdr
aux_hash = merkle_tree[chain_merkle_index]
aux_solved[-1] = (
yield self.auxs[chain].rpc_getauxblock(aux_hash, auxpow))
any_solved = any_solved or aux_solved[-1]
logger.info("%s,solve,%s,%s,%s", datetime.utcnow().isoformat(),
"=",
",".join(["1" if solve else "0" for solve in aux_solved]),
parent_hash)
if any_solved: self.update_auxs()
defer.returnValue(any_solved)
except Exception:
# Exceptions here are normally already handled by the rpc functions
#logger.debug(traceback.format_exc())
raise
def main(args):
parent = map(Proxy, args.parent_url)
aux_urls = args.aux_urls or ['http://un:pw@127.0.0.1:8342/']
auxs = [Proxy(url) for url in aux_urls]
if args.merkle_size is None:
for i in range(8):
if (1<<i) > len(aux_urls):
args.merkle_size = i
logger.info('merkle size = %d', i)
break
if len(aux_urls) > args.merkle_size:
raise ValueError('the merkle size must be at least as large as the number of aux chains')
if args.merkle_size > 1 and not have_merkletree:
raise ValueError('Missing merkletree module. Only a single subchain will work.')
if args.pidfile:
pidfile = open(args.pidfile, 'w')
pidfile.write(str(os.getpid()))
pidfile.close()
listener = Listener(parent, auxs, args.merkle_size, args.rewrite_target)
listener.update_aux_process()
reactor.listenTCP(args.worker_port, server.Site(listener))
def run():
parser = argparse.ArgumentParser(description='merge-mine-proxy (version %s)' % (__version__,))
parser.add_argument('--version', action='version', version=__version__)
worker_group = parser.add_argument_group('worker interface')
worker_group.add_argument('-w', '--worker-port', metavar='PORT',
help='listen on PORT for RPC connections from miners asking for work and providing responses (default: 8772)',
type=int, action='store', default=8772, dest='worker_port')
parent_group = parser.add_argument_group('parent chain (Eloipool) interface')
parent_group.add_argument('-p', '--parent-url', metavar='PARENT_URL',
help='connect to Eloipool at this address (default: http://un:pw@127.0.0.1:8330/)',
type=str, action='store', nargs='+', dest='parent_url')
aux_group = parser.add_argument_group('aux chain (e.g. Photon, B+ etc..) interface(s)')
aux_group.add_argument('-x', '--aux-url', metavar='AUX_URL',
help='connect to the aux RPCs at this address (default: http://un:pw@127.0.0.1:8342/)',
type=str, action='append', default=[], dest='aux_urls')
aux_group.add_argument('-s', '--merkle-size', metavar='SIZE',
help='use these many entries in the merkle tree. Must be a power of 2. Default is lowest power of 2 greater than number of aux chains.',
type=int, action='store', default=None, dest='merkle_size')
parser.add_argument('-r', '--rewrite-target', help='rewrite target difficulty to 32',
action='store_const', const=32, default=False, dest='rewrite_target')
parser.add_argument('-R', '--rewrite-target-1024', help='rewrite target difficulty to 1024',
action='store_const', const=1024, default=False, dest='rewrite_target')
parser.add_argument('-i', '--pidfile', metavar='PID', type=str, action='store', default=None, dest='pidfile')
parser.add_argument('-l', '--logfile', metavar='LOG', type=str, action='store', default=None, dest='logfile')
args = parser.parse_args()
if args.logfile:
logger.addHandler(logging.FileHandler(args.logfile))
else:
logger.addHandler(logging.StreamHandler())
reactor.callWhenRunning(main, args)
reactor.run()
if __name__ == "__main__":
run()
|
|
import idc
import idaapi
import idautils
import time
# This limits the depth of any individual path, as well as the maximum
# number of paths that will be searched; this is needed for practical
# reasons, as IDBs with tens of thousands of functions take a long time
# to exhaust all possible paths without some practical limitation.
#
# This is global so it's easy to change from the IDAPython prompt.
ALLEYCAT_LIMIT = 10000
class AlleyCatException(Exception):
pass
class AlleyCat(object):
'''
Class which resolves code paths. This is where most of the work is done.
'''
def __init__(self, start, end):
'''
Class constructor.
@start - The start address.
@end - The end address.
Returns None.
'''
global ALLEYCAT_LIMIT
self.limit = ALLEYCAT_LIMIT
self.paths = []
# We work backwards via xrefs, so we start at the end and end at the start
print "Generating call paths from %s to %s..." % (self._name(end), self._name(start))
self._build_paths(start, end)
def _name(self, ea):
name = idc.Name(ea)
if not name:
name = idc.GetFuncOffset(ea)
if not name:
name = '0x%X' % ea
return name
def _add_path(self, path):
if path not in self.paths:
self.paths.append(path)
def _build_paths(self, start, end=idc.BADADDR):
partial_paths = [[start]]
# Loop while there are still unresolve paths and while all path sizes have not exceeded ALLEYCAT_LIMIT
while partial_paths and len(self.paths) < self.limit and len(partial_paths) < self.limit:
# Initialize a unique set of callers for this iteration
callers = set()
# Callee is the last entry of the first path in partial paths.
# The first path list will change as paths are completed and popped from the list.
callee = partial_paths[0][-1]
# Find all unique functions that reference the callee, assuming this path has not
# exceeded ALLEYCAT_LIMIT.
if len(partial_paths[0]) < self.limit:
for xref in idautils.XrefsTo(callee):
caller = self._get_code_block(xref.frm)
if caller and caller.startEA not in callers:
callers.add(caller.startEA)
# If there are callers to the callee, remove the callee's current path
# and insert new ones with the new callers appended.
if callers:
base_path = partial_paths.pop(0)
for caller in callers:
# Don't want to loop back on ourselves in the same path
if caller in base_path:
continue
# If we've reached the desired end node, don't go any further down this path
if caller == end:
self._add_path((base_path + [caller])[::-1])
else:
partial_paths.append(base_path + [caller])
# Else, our end node is not in this path, so don't include it in the finished path list.
elif end not in partial_paths[0]:
partial_paths.pop(0)
# If there were no callers then this path has been exhaused and should be
# popped from the partial path list into the finished path list.
elif end in partial_paths[0]:
# Paths start with the end function and end with the start function; reverse it.
self._add_path(partial_paths.pop(0)[::-1])
class AlleyCatFunctionPaths(AlleyCat):
def __init__(self, start_ea, end_ea):
# We work backwards via xrefs, so we start at the end and end at the start
try:
start = idaapi.get_func(end_ea).startEA
except:
raise AlleyCatException("Address 0x%X is not part of a function!" % end_ea)
try:
end = idaapi.get_func(start_ea).startEA
except:
end = idc.BADADDR
super(AlleyCatFunctionPaths, self).__init__(start, end)
def _get_code_block(self, ea):
return idaapi.get_func(ea)
class AlleyCatCodePaths(AlleyCat):
def __init__(self, start_ea, end_ea):
end_func = idaapi.get_func(end_ea)
start_func = idaapi.get_func(start_ea)
if not start_func:
raise AlleyCatException("Address 0x%X is not part of a function!" % start_ea)
if not end_func:
raise AlleyCatException("Address 0x%X is not part of a function!" % end_ea)
if start_func.startEA != end_func.startEA:
raise AlleyCatException("The start and end addresses are not part of the same function!")
self.func = start_func
self.blocks = [block for block in idaapi.FlowChart(self.func)]
# We work backwards via xrefs, so we start at the end and end at the start
end_block = self._get_code_block(start_ea)
start_block = self._get_code_block(end_ea)
if not end_block:
raise AlleyCatException("Failed to find the code block associated with address 0x%X" % start_ea)
if not start_block:
raise AlleyCatException("Failed to find the code block associated with address 0x%X" % end_ea)
super(AlleyCatCodePaths, self).__init__(start_block.startEA, end_block.startEA)
def _get_code_block(self, ea):
for block in self.blocks:
if block.startEA <= ea and block.endEA > ea:
return block
return None
### Everything below here is just IDA UI/Plugin stuff ###
class AlleyCatGraphHistory(object):
'''
Manages include/exclude graph history.
'''
INCLUDE_ACTION = 0
EXCLUDE_ACTION = 1
def __init__(self):
self.reset()
def reset(self):
self.history = []
self.includes = []
self.excludes = []
self.history_index = 0
self.include_index = 0
self.exclude_index = 0
def update_history(self, action):
if self.excludes and len(self.history)-1 != self.history_index:
self.history = self.history[0:self.history_index+1]
self.history.append(action)
self.history_index = len(self.history)-1
def add_include(self, obj):
if self.includes and len(self.includes)-1 != self.include_index:
self.includes = self.includes[0:self.include_index+1]
self.includes.append(obj)
self.include_index = len(self.includes)-1
self.update_history(self.INCLUDE_ACTION)
def add_exclude(self, obj):
if len(self.excludes)-1 != self.exclude_index:
self.excludes = self.excludes[0:self.exclude_index+1]
self.excludes.append(obj)
self.exclude_index = len(self.excludes)-1
self.update_history(self.EXCLUDE_ACTION)
def get_includes(self):
return set(self.includes[0:self.include_index+1])
def get_excludes(self):
return set(self.excludes[0:self.exclude_index+1])
def undo(self):
if self.history:
if self.history[self.history_index] == self.INCLUDE_ACTION:
if self.include_index >= 0:
self.include_index -= 1
elif self.history[self.history_index] == self.EXCLUDE_ACTION:
if self.exclude_index >= 0:
self.exclude_index -= 1
self.history_index -= 1
if self.history_index < 0:
self.history_index = 0
def redo(self):
self.history_index += 1
if self.history_index >= len(self.history):
self.history_index = len(self.history)-1
if self.history[self.history_index] == self.INCLUDE_ACTION:
if self.include_index < len(self.includes)-1:
self.include_index += 1
elif self.history[self.history_index] == self.EXCLUDE_ACTION:
if self.exclude_index < len(self.excludes)-1:
self.exclude_index += 1
class AlleyCatGraph(idaapi.GraphViewer):
'''
Displays the graph and manages graph actions.
'''
def __init__(self, results, title="AlleyCat Graph"):
idaapi.GraphViewer.__init__(self, title)
self.results = results
self.nodes_ea2id = {}
self.nodes_id2ea = {}
self.edges = {}
self.end_nodes = []
self.edge_nodes = []
self.start_nodes = []
self.history = AlleyCatGraphHistory()
self.include_on_click = False
self.exclude_on_click = False
def Show(self):
'''
Display the graph.
Returns True on success, False on failure.
'''
if not idaapi.GraphViewer.Show(self):
return False
else:
self.cmd_undo = self.AddCommand("Undo", "")
self.cmd_redo = self.AddCommand("Redo", "")
self.cmd_reset = self.AddCommand("Reset graph", "")
self.cmd_exclude = self.AddCommand("Exclude node", "")
self.cmd_include = self.AddCommand("Include node", "")
return True
def OnRefresh(self):
# Clear the graph before refreshing
self.clear()
self.nodes_ea2id = {}
self.nodes_id2ea = {}
self.edges = {}
self.end_nodes = []
self.edge_nodes = []
self.start_nodes = []
includes = self.history.get_includes()
excludes = self.history.get_excludes()
for path in self.results:
parent_node = None
# Check to see if this path contains all nodes marked for explicit inclusion
if (set(path) & includes) != includes:
continue
# Check to see if this path contains any nodes marked for explicit exclusion
if (set(path) & excludes) != set():
continue
for ea in path:
# If this node already exists, use its existing node ID
if self.nodes_ea2id.has_key(ea):
this_node = self.nodes_ea2id[ea]
# Else, add this node to the graph
else:
this_node = self.AddNode(self.get_name_by_ea(ea))
self.nodes_ea2id[ea] = this_node
self.nodes_id2ea[this_node] = ea
# If there is a parent node, add an edge between the parent node and this one
if parent_node is not None:
self.AddEdge(parent_node, this_node)
if this_node not in self.edges[parent_node]:
self.edges[parent_node].append(this_node)
# Update the parent node for the next loop
parent_node = this_node
if not self.edges.has_key(parent_node):
self.edges[parent_node] = []
# Highlight this node in the disassembly window
self.highlight(ea)
try:
# Track the first, last, and next to last nodes in each path for
# proper colorization in self.OnGetText.
self.start_nodes.append(self.nodes_ea2id[path[0]])
self.end_nodes.append(self.nodes_ea2id[path[-1]])
self.edge_nodes.append(self.nodes_ea2id[path[-2]])
except:
pass
return True
def OnGetText(self, node_id):
color = idc.DEFCOLOR
if node_id in self.edge_nodes:
color = 0x00ffff
elif node_id in self.start_nodes:
color = 0x00ff00
elif node_id in self.end_nodes:
color = 0x0000ff
return (self[node_id], color)
def OnHint(self, node_id):
hint = ""
try:
for edge_node in self.edges[node_id]:
hint += "%s\n" % self[edge_node]
except Exception as e:
pass
return hint
def OnCommand(self, cmd_id):
if self.cmd_undo == cmd_id:
if self.include_on_click or self.exclude_on_click:
self.include_on_click = False
self.exclude_on_click = False
else:
self.history.undo()
self.Refresh()
elif self.cmd_redo == cmd_id:
self.history.redo()
self.Refresh()
elif self.cmd_include == cmd_id:
self.include_on_click = True
elif self.cmd_exclude == cmd_id:
self.exclude_on_click = True
elif self.cmd_reset == cmd_id:
self.include_on_click = False
self.exclude_on_click = False
self.history.reset()
self.Refresh()
def OnClick(self, node_id):
if self.include_on_click:
self.history.add_include(self.nodes_id2ea[node_id])
self.include_on_click = False
elif self.exclude_on_click:
self.history.add_exclude(self.nodes_id2ea[node_id])
self.exclude_on_click = False
self.Refresh()
def OnDblClick(self, node_id):
xref_locations = []
node_ea = self.get_ea_by_name(self[node_id])
if self.edges.has_key(node_id):
for edge_node_id in self.edges[node_id]:
edge_node_name = self[edge_node_id]
edge_node_ea = self.get_ea_by_name(edge_node_name)
if edge_node_ea != idc.BADADDR:
for xref in idautils.XrefsTo(edge_node_ea):
# Is the specified node_id the source of this xref?
if self.match_xref_source(xref, node_ea):
xref_locations.append((xref.frm, edge_node_ea))
if xref_locations:
xref_locations.sort()
print ""
print "Path Xrefs from %s:" % self[node_id]
print "-" * 100
for (xref_ea, dst_ea) in xref_locations:
print "%-50s => %s" % (self.get_name_by_ea(xref_ea), self.get_name_by_ea(dst_ea))
print "-" * 100
print ""
idc.Jump(xref_locations[0][0])
else:
idc.Jump(node_ea)
def OnClose(self):
# TODO: Add a 'do not ask again' feature?
if idc.AskYN(1, "Path nodes have been highlighted in the disassembly window. Undo highlighting?") == 1:
self.unhighlight_all()
def match_xref_source(self, xref, source):
# TODO: This must be modified if support for graphing function blocks is added.
return ((xref.type != idc.fl_F) and (idc.GetFunctionAttr(xref.frm, idc.FUNCATTR_START) == source))
def get_ea_by_name(self, name):
'''
Get the address of a location by name.
@name - Location name
Returns the address of the named location, or idc.BADADDR on failure.
'''
# This allows support of the function offset style names (e.g., main+0C)
# TODO: Is there something in the IDA API that does this already??
if '+' in name:
(func_name, offset) = name.split('+')
base_ea = idc.LocByName(func_name)
if base_ea != idc.BADADDR:
try:
ea = base_ea + int(offset, 16)
except:
ea = idc.BADADDR
else:
ea = idc.LocByName(name)
if ea == idc.BADADDR:
try:
ea = int(name, 0)
except:
ea = idc.BADADDR
return ea
def clear(self):
# Clears the graph and unhighlights the disassembly
self.Clear()
self.unhighlight_all()
def get_name_by_ea(self, ea):
'''
Get the name of the specified address.
@ea - Address.
Returns a name for the address, one of idc.Name, idc.GetFuncOffset or 0xXXXXXXXX.
'''
name = idc.Name(ea)
if not name:
name = idc.GetFuncOffset(ea)
if not name:
name = "0x%X" % ea
return name
def colorize_node(self, ea, color):
# Colorizes an entire code block
func = idaapi.get_func(ea)
if func:
for block in idaapi.FlowChart(func):
if block.startEA <= ea and block.endEA > ea:
ea = block.startEA
while ea < block.endEA:
idaapi.set_item_color(ea, color)
ea = idc.NextHead(ea)
break
def highlight(self, ea):
# Highlights an entire code block
self.colorize_node(ea, 0x00FF00)
def unhighlight(self, ea):
# Unhighlights an entire code block
self.colorize_node(ea, idc.DEFCOLOR)
def unhighlight_all(self):
# Unhighlights all code blocks
for path in self.results:
for ea in path:
self.unhighlight(ea)
class idapathfinder_t(idaapi.plugin_t):
flags = 0
comment = ''
help = ''
wanted_name = 'AlleyCat'
wanted_hotkey = ''
def init(self):
ui_path = "View/Graphs/"
self.menu_contexts = []
self.graph = None
self.menu_contexts.append(idaapi.add_menu_item(ui_path,
"Find paths to the current function from...",
"",
0,
self.FindPathsFromMany,
(None,)))
self.menu_contexts.append(idaapi.add_menu_item(ui_path,
"Find paths from the current function to...",
"",
0,
self.FindPathsToMany,
(None,)))
self.menu_contexts.append(idaapi.add_menu_item(ui_path,
"Find paths in the current function to the current code block",
"",
0,
self.FindPathsToCodeBlock,
(None,)))
return idaapi.PLUGIN_KEEP
def term(self):
for context in self.menu_contexts:
idaapi.del_menu_item(context)
return None
def run(self, arg):
pass
def _current_function(self):
return idaapi.get_func(ScreenEA()).startEA
def _find_and_plot_paths(self, sources, targets, klass=AlleyCatFunctionPaths):
results = []
for target in targets:
for source in sources:
s = time.time()
r = klass(source, target).paths
e = time.time()
print "Found %d paths in %f seconds." % (len(r), (e-s))
if r:
results += r
else:
name = idc.Name(target)
if not name:
name = "0x%X" % target
print "No paths found to", name
if results:
# Be sure to close any previous graph before creating a new one.
# Failure to do so may crash IDA.
try:
self.graph.Close()
except:
pass
self.graph = AlleyCatGraph(results, 'Path Graph')
self.graph.Show()
def _get_user_selected_functions(self, many=False):
functions = []
ea = idc.ScreenEA()
try:
current_function = idc.GetFunctionAttr(ea, idc.FUNCATTR_START)
except:
current_function = None
while True:
function = idc.ChooseFunction("Select a function and click 'OK' until all functions have been selected. When finished, click 'Cancel' to display the graph.")
# ChooseFunction automatically jumps to the selected function
# if the enter key is pressed instead of clicking 'OK'. Annoying.
if idc.ScreenEA() != ea:
idc.Jump(ea)
if not function or function == idc.BADADDR or function == current_function:
break
elif function not in functions:
functions.append(function)
if not many:
break
return functions
def FindPathsToCodeBlock(self, arg):
target = idc.ScreenEA()
source = self._current_function()
if source:
self._find_and_plot_paths([source], [target], klass=AlleyCatCodePaths)
def FindPathsToMany(self, arg):
source = self._current_function()
if source:
targets = self._get_user_selected_functions(many=True)
if targets:
self._find_and_plot_paths([source], targets)
def FindPathsFromMany(self, arg):
target = self._current_function()
if target:
sources = self._get_user_selected_functions(many=True)
if sources:
self._find_and_plot_paths(sources, [target])
def PLUGIN_ENTRY():
return idapathfinder_t()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations:
"""PacketCapturesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> "_models.PacketCaptureResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
parameters: "_models.PacketCapture",
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureResult"]:
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureResult":
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
async def _get_status_initial(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> "_models.PacketCaptureQueryStatusResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
async def begin_get_status(
self,
resource_group_name: str,
network_watcher_name: str,
packet_capture_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PacketCaptureQueryStatusResult"]:
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PacketCaptureListResult"]:
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
|
|
#!/usr/bin/env python
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import storage
import collections
from itertools import izip
import scrypt
import base64
from Crypto.Hash import SHA256 as HashAlg
from Crypto.Hash import HMAC
from Crypto.PublicKey import RSA as CryptoKey
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Signature import PKCS1_PSS as CryptoSigner
from Crypto import Random
import syndicate.client.common.log as Log
import singleton
log = Log.get_logger()
PRIVATE_STORAGE_DIR = "/keys"
STORAGE_DIR = "/keys"
VOLUME_STORAGE_DIRS = [
STORAGE_DIR
]
LOCAL_STORAGE_DIRS = [
PRIVATE_STORAGE_DIR
]
#-------------------------
EncryptedPrivateKey = collections.namedtuple( "EncryptedPrivateKey", ["salt", "data"] )
SignedPublicKey = collections.namedtuple( "SignedPublicKey", ["pubkey_str", "signature"] )
#-------------------------
KEY_SIZE = 4096
def generate_key_pair( key_size=KEY_SIZE ):
rng = Random.new().read
key = CryptoKey.generate(key_size, rng)
private_key_pem = key.exportKey()
public_key_pem = key.publickey().exportKey()
return (public_key_pem, private_key_pem)
#-------------------------
def make_key_local_path( name ):
global PRIVATE_STORAGE_DIR
# NOTE: do NOT store this on the Volume
return storage.local_path( PRIVATE_STORAGE_DIR, name )
#-------------------------
def make_key_volume_path( name ):
global STORAGE_DIR
# used for loading the private key from the Volume
return storage.volume_path( STORAGE_DIR, name )
#-------------------------
def encrypt_with_password( data, password ):
# first, make a PBKDF2 key from the password
salt = os.urandom(64) # 512 bits
key = PBKDF2( unicode(password), salt, dkLen=64 ) # 512 bit key
# second, feed this key and the private key into scrypt.
# NOTE: scrypt uses AES256-CTR with encrypt-then-MAC
enc_data = scrypt.encrypt( str(data), key )
return salt, enc_data
#-------------------------
def decrypt_with_password( encrypted_data, password, salt ):
# reproduce the password for decryption...
key = PBKDF2( unicode(password), salt, dkLen=64 )
try:
data = scrypt.decrypt( encrypted_data, key )
except:
log.error( "Failed to decrypt data. Wrong password?")
return None
return data
#-------------------------
def encrypt_private_key( privkey_str, password ):
salt, encrypted_key = encrypt_with_password( privkey_str, password )
return EncryptedPrivateKey( salt=base64.b64encode(salt), data=base64.b64encode( encrypted_key ) )
#-------------------------
def decrypt_private_key( encrypted_private_key, password ):
pkey_str = decrypt_with_password( base64.b64decode( encrypted_private_key.data ), password, base64.b64decode( encrypted_private_key.salt ) )
if pkey_str is None:
log.error("Failed to decrypt private key")
return pkey_str
#-------------------------
def load_private_key_from_path( key_path, password, local ):
encrypted_privkey_str = None
if local:
encrypted_privkey_str = storage.read_file( key_path, volume=None )
else:
encrypted_privkey_str = storage.read_file( key_path )
if encrypted_privkey_str is None:
log.error("Failed to load key from %s" % key_path )
return None
try:
encrypted_private_key = storage.json_to_tuple( EncryptedPrivateKey, encrypted_privkey_str )
except Exception, e:
log.error("Failed to unserialize private key")
return None
privkey_str = decrypt_private_key( encrypted_private_key, password )
# load this into a usable form
try:
privkey = CryptoKey.importKey( privkey_str )
assert privkey.has_private(), "Not a private key"
except Exception, e:
log.error("Failed to load private key")
log.exception(e)
return None
return privkey
#-------------------------
def load_private_key( key_name, password, check_volume=True ):
key_path = make_key_local_path( key_name )
local = True
if not storage.path_exists( key_path, volume=None ) and check_volume:
# load it from the Volume
key_path = make_key_volume_path( key_name )
local = False
return load_private_key_from_path( key_path, password, local )
#-------------------------
def load_private_key_from_volume( key_name, password ):
key_path = make_key_volume_path( key_name )
return load_private_key_from_path( key_path, password )
#-------------------------
def store_private_key_to_path( key_path, privkey, password, volume ):
privkey_str = privkey.exportKey()
encrypted_private_key = encrypt_private_key( privkey_str, password )
try:
encrypted_privkey_json = storage.tuple_to_json( encrypted_private_key )
except Exception, e:
log.error("Failed to serialize encrypted private key")
return False
rc = storage.write_file( key_path, encrypted_privkey_json, volume=volume )
return rc
#-------------------------
def store_private_key( key_name, privkey, password ):
# ensure the path exists...
global PRIVATE_STORAGE_DIR
key_path = make_key_local_path( key_name )
return store_private_key_to_path( key_path, privkey, password, None )
#-------------------------
def store_private_key_to_volume( key_name, privkey, password, num_downloads, duration, volume ):
# ensure the path exists...
key_path = make_key_volume_path( key_name )
# TODO: use num_downloads, duration to limit key lifetime on the Volume
return store_private_key_to_path( key_path, privkey, password, volume )
#-------------------------
def delete_private_key_from_volume( key_name, volume=None ):
key_path = make_key_volume_path( key_name )
rc = storage.delete_file( key_path, volume=volume )
return rc
#-------------------------
def delete_private_key( key_name ):
key_path = make_key_local_path( key_name )
rc = storage.delete_file( key_path, volume=None )
return rc
#-------------------------
def sign_data( privkey_str, data ):
privkey = CryptoKey.importKey( privkey_str )
h = HashAlg.new( data )
signer = CryptoSigner.new(privkey)
signature = signer.sign( h )
return signature
#-------------------------
def verify_data( pubkey_str, data, sig ):
pubkey = CryptoKey.importKey( pubkey_str )
h = HashAlg.new( data )
verifier = CryptoSigner.new(pubkey)
ret = verifier.verify( h, sig )
return ret
#-------------------------
def sign_public_key( pubkey_str, syndicate_user_privkey ):
h = HashAlg.new( pubkey_str )
signer = CryptoSigner.new(syndicate_user_privkey)
signature = signer.sign( h )
return signature
#-------------------------
def verify_public_key( pubkey, syndicate_user_pubkey ):
h = HashAlg.new( pubkey.pubkey_str )
verifier = CryptoSigner.new(syndicate_user_pubkey)
ret = verifier.verify( h, base64.b64decode(pubkey.signature) )
return ret
#-------------------------
def store_public_key( key_name, pubkey, syndicate_user_privkey ):
pubkey_str = pubkey.publickey().exportKey()
signature = sign_public_key( pubkey_str, syndicate_user_privkey )
pubkey = SignedPublicKey( signature=base64.b64encode(signature), pubkey_str=pubkey_str )
try:
pubkey_json = storage.tuple_to_json( pubkey )
except Exception, e:
log.error("Failed to serialize signed public key")
log.exception(e)
return False
key_path = make_key_local_path( key_name + ".pub" )
return storage.write_file( key_path, pubkey_json, volume=None )
#-------------------------
def load_public_key( key_name, syndicate_user_pubkey ):
key_path = make_key_local_path( key_name + ".pub" )
pubkey_json = storage.read_file( key_path, volume=None )
if pubkey_json is None:
log.error("Failed to load public key")
return False
try:
pubkey = storage.json_to_tuple( SignedPublicKey, pubkey_json )
except Exception, e:
log.error("Failed to unserialize signed public key")
log.exception(e)
return False
rc = verify_public_key( pubkey, syndicate_user_pubkey )
if not rc:
log.error("Failed to verify signed public key")
return rc
return CryptoKey.importKey( pubkey.pubkey_str )
#-------------------------
def delete_public_key( key_name ):
key_path = make_key_local_path( key_name + ".pub" )
return storage.delete_file( key_path, volume=None )
#-------------------------
def secure_hash_compare(s1, s2):
# constant-time compare
# see http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(s1, s2):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
if __name__ == "__main__":
pubkey_str = """
-----BEGIN PUBLIC KEY-----
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxwhi2mh+f/Uxcx6RuO42
EuVpxDHuciTMguJygvAHEuGTM/0hEW04Im1LfXldfpKv772XrCq+M6oKfUiee3tl
sVhTf+8SZfbTdR7Zz132kdP1grNafGrp57mkOwxjFRE3FA23T1bHXpIaEcdhBo0R
rXyEnxpJmnLyNYHaLN8rTOig5WFbnmhIZD+xCNtG7hFy39hKt+vNTWK98kMCOMsY
QPywYw8nJaax/kY5SEiUup32BeZWV9HRljjJYlB5kMdzeAXcjQKvn5y47qmluVmx
L1LRX5T2v11KLSpArSDO4At5qPPnrXhbsH3C2Z5L4jqStdLYB5ZYZdaAsaRKcc8V
WpsmzZaFExJ9Nj05sDS1YMFMvoINqaPEftS6Be+wgF8/klZoHFkuslUNLK9k2f65
A7d9Fn/B42n+dCDYx0SR6obABd89cR8/AASkZl3QKeCzW/wl9zrt5dL1iydOq2kw
JtgiKSCt6m7Hwx2kwHBGI8zUfNMBlfIlFu5CP+4xLTOlRdnXqYPylT56JQcjA2CB
hGBRJQFWVutrVtTXlbvT2OmUkRQT9+P5wr0c7fl+iOVXh2TwfaFeug9Fm8QWoGyP
GuKX1KO5JLQjcNTnZ3h3y9LIWHsCTCf2ltycUBguq8Mwzb5df2EkOVgFeLTfWyR2
lPCia/UWfs9eeGgdGe+Wr4sCAwEAAQ==
-----END PUBLIC KEY-----
""".strip()
privkey_str = """
-----BEGIN RSA PRIVATE KEY-----
MIIJKQIBAAKCAgEAxwhi2mh+f/Uxcx6RuO42EuVpxDHuciTMguJygvAHEuGTM/0h
EW04Im1LfXldfpKv772XrCq+M6oKfUiee3tlsVhTf+8SZfbTdR7Zz132kdP1grNa
fGrp57mkOwxjFRE3FA23T1bHXpIaEcdhBo0RrXyEnxpJmnLyNYHaLN8rTOig5WFb
nmhIZD+xCNtG7hFy39hKt+vNTWK98kMCOMsYQPywYw8nJaax/kY5SEiUup32BeZW
V9HRljjJYlB5kMdzeAXcjQKvn5y47qmluVmxL1LRX5T2v11KLSpArSDO4At5qPPn
rXhbsH3C2Z5L4jqStdLYB5ZYZdaAsaRKcc8VWpsmzZaFExJ9Nj05sDS1YMFMvoIN
qaPEftS6Be+wgF8/klZoHFkuslUNLK9k2f65A7d9Fn/B42n+dCDYx0SR6obABd89
cR8/AASkZl3QKeCzW/wl9zrt5dL1iydOq2kwJtgiKSCt6m7Hwx2kwHBGI8zUfNMB
lfIlFu5CP+4xLTOlRdnXqYPylT56JQcjA2CBhGBRJQFWVutrVtTXlbvT2OmUkRQT
9+P5wr0c7fl+iOVXh2TwfaFeug9Fm8QWoGyPGuKX1KO5JLQjcNTnZ3h3y9LIWHsC
TCf2ltycUBguq8Mwzb5df2EkOVgFeLTfWyR2lPCia/UWfs9eeGgdGe+Wr4sCAwEA
AQKCAgEAl1fvIzkWB+LAaVMzZ7XrdE7yL/fv4ufMgzIB9ULjfh39Oykd/gxZBQSq
xIyG5XpRQjGepZIS82I3e7C+ohLg7wvE4qE+Ej6v6H0/DonatmTAaVRMWBNMLaJi
GWx/40Ml6J/NZg0MqQLbw+0iAENAz/TBO+JXWZRSTRGif0Brwp2ZyxJPApM1iNVN
nvhuZRTrjv7/Qf+SK2gMG62MgPceSDxdO9YH5H9vFXT8ldRrE8SNkUrnGPw5LMud
hp6+8bJYQUnjvW3vcaVQklp55AkpzFxjTRUO09DyWImqiHtME91l820UHDpLLldS
1PujpDD54jyjfJF8QmPrlCjjWssm5ll8AYpZFn1mp3SDY6CQhKGdLXjmPlBvEaoR
7yfNa7JRuJAM8ntrfxj3fk0B8t2e5NMylZsBICtposCkVTXpBVJt50gs7hHjiR3/
Q/P7t19ywEMlHx5edy+E394q8UL94YRf7gYEF4VFCxT1k3BhYGw8m3Ov22HS7EZy
2vFqro+RMOR7VkQZXvGecsaZ/5xhL8YIOS+9S90P0tmMVYmuMgp7L+Lm6DZi0Od6
cwKxB7LYabzrpfHXSIfqE5JUgpkV5iTVo4kbmHsrBQB1ysNFR74E1PJFy5JuFfHZ
Tpw0KDBCIXVRFFanQ19pCcbP85MucKWif/DhjOr6nE/js/8O6XECggEBAN0lhYmq
cPH9TucoGnpoRv2o+GkA0aA4HMIXQq4u89LNxOH+zBiom47AAj2onWl+Zo3Dliyy
jBSzKkKSVvBwsuxgz9xq7VNBDiaK+wj1rS6MPqa/0Iyz5Fhi0STp2Fm/elDonYJ8
Jp8MRIWDk0luMgaAh7DuKpIm9dsg45wQmm/4LAGJw6WbbbZ4TUGrT684qIRXk8Q5
1Z08hgSOKUIyDwmv4LqenV6n4XemTq3zs8R0abQiJm81YqSOXwsJppXXgZoUM8sg
L/gxX5pXxCzAfC2QpLI94VJcVtRUNGBK5rMmrANd2uITg6h/wDCy9FxRKWG8f+p4
qAcxr/oXXXebI98CggEBAOZmppx+PoRWaZM547VebUrEDKuZ/lp10hXnr3gkDAKz
2av8jy3YdtCKq547LygpBbjd1i/zFNDZ/r4XT+w/PfnNRMuJR5td29T+lWMi3Hm3
ant/o8qAyVISgkRW1YQjTAhPwYbHc2Y24n/roCutrtIBG9WMLQNEbJUXjU5uNF/0
+ezKKNFIruCX/JafupBfXl1zAEVuT0IkqlHbmSL4oxYafhPorLzjIPLiJgjAB6Wb
iIOVIUJt61O6vkmeBWOP+bj5x1be6h35MlhKT+p4rMimaUALvbGlGQBX+Bm54/cN
Ih0Kqx/gsDoD5rribQhuY0RANo1wfXdkW/ajHZihCdUCggEABO01EGAPrBRskZG/
JUL1cek1v4EZKmyVl21VOvQo0mVrIW2/tjzrWj7EzgLXnuYF+tqEmfJQVJW5N0pz
TV/1XHa7qrlnGBe27Pzjost2VDcjnitfxgKr75wj9KKRA07UtsC34ZRKd/iZ/i90
NIqT6rkqTLLBmAfuKjeNWoi0KBJrSI19Ik9YHlyHvBLI76pfdrNMw25WZ+5VPfy8
xpC+7QRSCVZHQziSOUwnLJDlTFcbk7u/B3M1A114mJJad7QZWwlgLgJFj03qR1H1
ONoA6jLyuFXQkzkjZg+KKysAALW310tb+PVeVX6jFXKnJvdX6Kl+YAbYF3Dv7q5e
kq+OGQKCAQEAngEnoYqyNO9N17mLf4YSTYPFbKle1YqXWI5at3mBAxlz3Y6GYlpg
oQN4TjsoS9JWKkF38coyLEhTeulh1hJI3lb3Jt4uTU5AxAETUblGmfI/BBK0sNtB
NRecXmFubAAI1GpdvaBqc16QVkmwvkON8FbyT7Ch7euuy1Arh+3r3SKTgt/gviWq
SDvy7Rj9SKUegdesB/FuSV37r8d5bZI1xaLFc8HNNHxOzEJq8vU+SUQwioxrErNu
/yzB8pp795t1FnW1Ts3woD2VWRcdVx8K30/APjvPC1S9oI6zhnEE9Rf8nQ4D7QiZ
0i96vA8r1uxdByFCSB0s7gPVTX7vfQxzQQKCAQAnNWvIwXR1W40wS5kgKwNd9zyO
+G9mWRvQgM3PptUXM6XV1kSPd+VofGvQ3ApYJ3I7f7VPPNTPVLI57vUkhOrKbBvh
Td3OGzhV48behsSmOEsXkNcOiogtqQsACZzgzI+46akS87m+OHhP8H3KcdsvGUNM
xwHi4nnnVSMQ+SWtSuCHgA+1gX5YlNKDjq3RLCRG//9XHIApfc9c52TJKZukLpfx
chit4EZW1ws/JPkQ+Yer91mCQaSkPnIBn2crzce4yqm2dOeHlhsfo25Wr37uJtWY
X8H/SaEdrJv+LaA61Fy4rJS/56Qg+LSy05lISwIHBu9SmhTuY1lBrr9jMa3Q
-----END RSA PRIVATE KEY-----
""".strip()
import session
fake_module = collections.namedtuple( "FakeModule", ["VOLUME_STORAGE_DIRS", "LOCAL_STORAGE_DIRS"] )
fake_vol = session.do_test_volume( "/tmp/storage-test/volume" )
singleton.set_volume( fake_vol )
fake_mod = fake_module( LOCAL_STORAGE_DIRS=LOCAL_STORAGE_DIRS, VOLUME_STORAGE_DIRS=VOLUME_STORAGE_DIRS )
assert storage.setup_storage( privkey_str, "/apps/syndicatemail/data", "/tmp/storage-test/local", [fake_mod] ), "setup_storage failed"
print "----- secure hash compare -----"
assert secure_hash_compare("sniff", "sniff"), "secure compare failed for equality"
assert not secure_hash_compare("sniff", "yoink"), "secure compare failed for inequality"
print "----- encrypt key -----"
encrypted_key = encrypt_private_key( privkey_str, "sniff" )
print "----- decrypt key -----"
decrypted_key = decrypt_private_key( encrypted_key, "sniff" )
assert privkey_str == decrypted_key, "Decrypt(Encrypt( key )) != key\n\nexpected\n%s\n\n\ngot\n%s" % (str(privkey_str), str(decrypted_key))
privkey = CryptoKey.importKey( privkey_str )
pubkey = CryptoKey.importKey( pubkey_str )
rc = store_private_key( "test.key", privkey, "sniff" )
if not rc:
raise Exception("store_private_key failed")
privkey2 = load_private_key( "test.key", "sniff" )
privkey2_str = privkey2.exportKey()
assert privkey_str == privkey2_str, "load(store(key)) != key"
rc = store_public_key( "test2.key", pubkey, privkey )
if not rc:
raise Exception("store_public_key failed")
pubkey2_str = load_public_key( "test2.key", pubkey ).exportKey()
assert pubkey2_str == pubkey_str, "load(store(pubkey)) != pubkey"
#delete_private_key( "test.key" )
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import time
import subprocess
# Import parameters from the setup file.
sys.path.append('.')
from setup import (
setup_dict, get_project_files, print_success_message,
print_failure_message, _lint, _test, _test_all,
CODE_DIRECTORY, DOCS_DIRECTORY, TESTS_DIRECTORY, PYTEST_FLAGS)
from paver.easy import options, task, needs, consume_args
from paver.setuputils import install_distutils_tasks
options(setup=setup_dict)
install_distutils_tasks()
## Miscellaneous helper functions
def print_passed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=PASSED
print_success_message(r''' ___ _ ___ ___ ___ ___
| _ \/_\ / __/ __| __| \
| _/ _ \\__ \__ \ _|| |) |
|_|/_/ \_\___/___/___|___/
''')
def print_failed():
# generated on http://patorjk.com/software/taag/#p=display&f=Small&t=FAILED
print_failure_message(r''' ___ _ ___ _ ___ ___
| __/_\ |_ _| | | __| \
| _/ _ \ | || |__| _|| |) |
|_/_/ \_\___|____|___|___/
''')
class cwd(object):
"""Class used for temporarily changing directories. Can be though of
as a `pushd /my/dir' then a `popd' at the end.
"""
def __init__(self, newcwd):
""":param newcwd: directory to make the cwd
:type newcwd: :class:`str`
"""
self.newcwd = newcwd
def __enter__(self):
self.oldcwd = os.getcwd()
os.chdir(self.newcwd)
return os.getcwd()
def __exit__(self, type_, value, traceback):
# This acts like a `finally' clause: it will always be executed.
os.chdir(self.oldcwd)
## Task-related functions
def _doc_make(*make_args):
"""Run make in sphinx' docs directory.
:return: exit code
"""
if sys.platform == 'win32':
# Windows
make_cmd = ['make.bat']
else:
# Linux, Mac OS X, and others
make_cmd = ['make']
make_cmd.extend(make_args)
# Account for a stupid Python "bug" on Windows:
# <http://bugs.python.org/issue15533>
with cwd(DOCS_DIRECTORY):
retcode = subprocess.call(make_cmd)
return retcode
## Tasks
@task
@needs('doc_html', 'setuptools.command.sdist')
def sdist():
"""Build the HTML docs and the tarball."""
pass
@task
def test():
"""Run the unit tests."""
raise SystemExit(_test())
@task
def lint():
# This refuses to format properly when running `paver help' unless
# this ugliness is used.
('Perform PEP8 style check, run PyFlakes, and run McCabe complexity '
'metrics on the code.')
raise SystemExit(_lint())
@task
def test_all():
"""Perform a style check and run all unit tests."""
retcode = _test_all()
if retcode == 0:
print_passed()
else:
print_failed()
raise SystemExit(retcode)
@task
@consume_args
def run(args):
"""Run the package's main script. All arguments are passed to it."""
# The main script expects to get the called executable's name as
# argv[0]. However, paver doesn't provide that in args. Even if it did (or
# we dove into sys.argv), it wouldn't be useful because it would be paver's
# executable. So we just pass the package name in as the executable name,
# since it's close enough. This should never be seen by an end user
# installing through Setuptools anyway.
from easy_tensorflow.main import main
raise SystemExit(main([CODE_DIRECTORY] + args))
@task
def commit():
"""Commit only if all the tests pass."""
if _test_all() == 0:
subprocess.check_call(['git', 'commit'])
else:
print_failure_message('\nTests failed, not committing.')
@task
def coverage():
"""Run tests and show test coverage report."""
try:
import pytest_cov # NOQA
except ImportError:
print_failure_message(
'Install the pytest coverage plugin to use this task, '
"i.e., `pip install pytest-cov'.")
raise SystemExit(1)
import pytest
pytest.main(PYTEST_FLAGS + [
'--cov', CODE_DIRECTORY,
'--cov-report', 'term-missing',
TESTS_DIRECTORY])
@task # NOQA
def doc_watch():
"""Watch for changes in the docs and rebuild HTML docs when changed."""
try:
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
except ImportError:
print_failure_message('Install the watchdog package to use this task, '
"i.e., `pip install watchdog'.")
raise SystemExit(1)
class RebuildDocsEventHandler(FileSystemEventHandler):
def __init__(self, base_paths):
self.base_paths = base_paths
def dispatch(self, event):
"""Dispatches events to the appropriate methods.
:param event: The event object representing the file system event.
:type event: :class:`watchdog.events.FileSystemEvent`
"""
for base_path in self.base_paths:
if event.src_path.endswith(base_path):
super(RebuildDocsEventHandler, self).dispatch(event)
# We found one that matches. We're done.
return
def on_modified(self, event):
print_failure_message('Modification detected. Rebuilding docs.')
# # Strip off the path prefix.
# import os
# if event.src_path[len(os.getcwd()) + 1:].startswith(
# CODE_DIRECTORY):
# # sphinx-build doesn't always pick up changes on code files,
# # even though they are used to generate the documentation. As
# # a workaround, just clean before building.
doc_html()
print_success_message('Docs have been rebuilt.')
print_success_message(
'Watching for changes in project files, press Ctrl-C to cancel...')
handler = RebuildDocsEventHandler(get_project_files())
observer = Observer()
observer.schedule(handler, path='.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@task
@needs('doc_html')
def doc_open():
"""Build the HTML docs and open them in a web browser."""
doc_index = os.path.join(DOCS_DIRECTORY, 'build', 'html', 'index.html')
if sys.platform == 'darwin':
# Mac OS X
subprocess.check_call(['open', doc_index])
elif sys.platform == 'win32':
# Windows
subprocess.check_call(['start', doc_index], shell=True)
elif sys.platform == 'linux2':
# All freedesktop-compatible desktops
subprocess.check_call(['xdg-open', doc_index])
else:
print_failure_message(
"Unsupported platform. Please open `{0}' manually.".format(
doc_index))
@task
def get_tasks():
"""Get all paver-defined tasks."""
from paver.tasks import environment
for task in environment.get_tasks():
print(task.shortname)
@task
def doc_html():
"""Build the HTML docs."""
retcode = _doc_make('html')
if retcode:
raise SystemExit(retcode)
@task
def doc_clean():
"""Clean (delete) the built docs."""
retcode = _doc_make('clean')
if retcode:
raise SystemExit(retcode)
|
|
""" Create splits of the data into train and test data used for cross-validation """
import random
from pySPACE.missions.nodes.base_node import BaseNode
from pySPACE.missions.nodes.decorators import NoOptimizationParameter
from pySPACE.tools.memoize_generator import MemoizeGenerator
import logging
@NoOptimizationParameter("stratified")
@NoOptimizationParameter("random")
@NoOptimizationParameter("time_dependent")
class CrossValidationSplitterNode(BaseNode):
""" Perform (stratified) cross-validation
During benchmarking, n pairs of training and test data are generated, where
n is configurable via the parameter splits. The n test datasets are pairwise
disjunct. Internally, the available data is partitioned into n pairwise
disjunct sets s_1, ..., s_n of equal size (the "splits"). The i-th pair of
training and test data is generated by using s_i as test data and the
union of the remaining datasets as training data.
The partitioning is stratified per default, i.e. the splits have the same
class ratio as the overall dataset. Per default, the partitioning is based
on shuffling the data randomly. In this case, the partitioning of the data
into s_1, ..., s_n is determined solely based on the run number (used as
random seed), yielding the same split for the same run_number and different
ones for two different run_numbers.
**Parameters**
:splits:
The number of splits created internally. If n data points exist and
m splits are created, each of these splits consists of approx. m/n
data points.
(*optional, default: 10*)
:stratified:
If true, the cross-validation is stratified, i.e. the overall
class-ratio is retained in each split (as good as possible).
(*optional, default: True*)
:random:
If true, the order of the data is randomly shuffled.
(*optional, default: True*)
:time_dependent:
If True splitting is done separately for different (= not
overlapping) time windows to ensure that instances corresponding to the
same marker will be in the same split.
.. note:: Stratification is only allowed here if there is only one
class label for one marker.
(*optional, default: False*)
:stratified_class:
If *time_dependent* is True and *stratified_class* is specified
stratification is only done for the specified class label (String).
The other class is filling the split preserving the time order of the
data. This also means that *random* has no effect here.
(*optional, default: None*)
**Exemplary Call**
.. code-block:: yaml
-
node : CV_Splitter
parameters :
splits : 10
stratified : True
:Author: Jan Hendrik Metzen (jhm@informatik.uni-bremen.de)
:Created: 2008/12/16
"""
def __init__(self, splits=10, stratified=True, random=True,
time_dependent=False, stratified_class = None, *args, **kwargs):
super(CrossValidationSplitterNode, self).__init__(*args, **kwargs)
self.set_permanent_attributes(splits = int(splits), #how many splits
current_split = 0, # current split for testing
split_indices = None,
run_number = -1,
random = random,
stratified = stratified,
stratified_class = stratified_class,
time_dependent = time_dependent)
def is_split_node(self):
""" Return whether this is a split node """
return True
def use_next_split(self):
""" Use the next split of the data into training and test data.
Returns True if more splits are available, otherwise False.
This method is useful for benchmarking
"""
if self.current_split + 1 < self.splits:
self.current_split = self.current_split + 1
self._log("Benchmarking with split %s/%s" % (self.current_split + 1,
self.splits))
return True
else:
return False
def train_sweep(self, use_test_data):
""" Performs the actual training of the node.
.. note:: Split nodes cannot be trained
"""
raise Exception("Split nodes cannot be trained")
def request_data_for_training(self, use_test_data):
""" Returns the data for training of subsequent nodes
.. todo:: to document
"""
# Create cv-splits lazily when required
if self.split_indices == None:
self._create_splits()
# All data can be used for training which is not explicitly
# specified for testing by the current cv-split
self.data_for_training = MemoizeGenerator(
self.data[i] for i in range(len(self.data))
if not i in self.split_indices[self.current_split])
return self.data_for_training.fresh()
def request_data_for_testing(self):
""" Returns the data for testing of subsequent nodes
.. todo:: to document
"""
# Create cv-splits lazily when required
if self.split_indices == None:
self._create_splits()
# Only that data can be used for testing which is explicitly
# specified for this purpose by the current cv-split
self.data_for_testing = MemoizeGenerator(
self.data[i] for i in self.split_indices[self.current_split])
return self.data_for_testing.fresh()
def _create_splits(self):
""" Create the split of the data for n-fold cross-validation """
self._log("Creating %s splits for cross validation" % self.splits)
# Get training and test data (with labels)
train_data = \
list(self.input_node.request_data_for_training(use_test_data=False))
test_data = list(self.input_node.request_data_for_testing())
# If there is already a non-empty training set,
# it means that we are not the first split node in the node chain
if len(train_data) > 0:
raise Exception("No iterated splitting of data sets allowed\n "
"(Calling a splitter on a data set that is "
"already split)")
# Remember all the data and store it in memory
# TODO: This might cause problems for large dataset
self.data = train_data + test_data
# initialize result structure: Determine which data points are
# reserved for testing in which cross validation run
split_indices = []
if self.time_dependent:
# sort the data according to start_time
self.data.sort(key=lambda swindow: swindow[0].start_time)
# divide the data with respect to the time_point
data_time = dict()
last_window_end_time = 0.0
marker = -1
label_marker = dict()
for (index, (window, label)) in enumerate(self.data):
if window.start_time > last_window_end_time:
marker += 1
data_time[marker] = [index]
if self.stratified or self.stratified_class:
if label not in label_marker:
label_marker[label] = [marker]
else:
label_marker[label].append(marker)
else:
data_time[marker].append(index)
# check label consistency for later stratification
if (self.stratified or self.stratified_class) and \
self.data[data_time[marker][0]][1] != label:
import warnings
warnings.warn(
"Since there are several class labels"
" for one marker stratification is set to False.",
UserWarning)
self.stratified = False
self.stratified_class = None
last_window_end_time = window.end_time
#print "data_time: \n", data_time
if self.stratified: # each marker has only one label
# not more splits then markers of every class!
assert(min([len(markers) for markers in
label_marker.values()]) >= self.splits)
# extend result structure since we need it in the next block
split_indices = [[] for i in range(self.splits)]
# determine the splits of the data
for label, markers in label_marker.iteritems():
data_size = len(markers)
# Set random seed and randomize the order of the data
if self.random:
r = random.Random(self.run_number)
r.shuffle(markers)
for j in range(self.splits):
split_start = int(round(float(j) * data_size/self.splits))
split_end = int(round(float(j+1) * data_size/self.splits))
# means half-open interval [split_start, split_end)
for i in range(split_start, split_end):
split_indices[j].extend(data_time[markers[i]])
# avoid sorted labels by sorting time dependent
split_indices = [sorted(split_list)
for split_list in split_indices]
#print "run_number:", self.run_number
#print "time_dependent && stratified:\n", split_indices
elif self.stratified_class:
# extend result structure since we need it in the next block
split_indices = [[] for i in range(self.splits)]
# determine the splits of the data
data_size = len(label_marker[self.stratified_class])
for j in range(self.splits):
split_start = int(round(float(j) * data_size/self.splits))
split_end = int(round(float(j+1) * data_size/self.splits))
# means half-open interval [split_start, split_end)
for i in range(split_start, split_end):
split_indices[j].extend(data_time[label_marker[self.stratified_class][i]])
#print "time_dependent && stratified_class:\n before filling up\n", split_indices
# fill up with other classes
last_max_index = 0
for split_list in split_indices:
max_index = max(split_list)
for i in range(last_max_index, max_index):
if self.data[i][1] != self.stratified_class:
split_list.append(i)
last_max_index = max_index+1
for i in range(last_max_index, len(self.data)):
if self.data[i][1] != self.stratified_class:
split_indices[-1].append(i)
# avoid sorted labels by sorting time dependent
split_indices = [sorted(split_list)
for split_list in split_indices]
print "time_dependent && stratified_class:\n", split_indices
else:
# we should not have more splits then (marker)time points
data_size = len(data_time.keys())
assert(data_size >= self.splits)
# Set random seed and randomize the order of the data
indices = data_time.keys()
if self.random:
r = random.Random(self.run_number)
r.shuffle(indices)
# determine the splits of the data
for i in range(self.splits):
split_indices.append([])
split_start = int(round(float(i) * data_size / self.splits))
split_end = int(round(float(i + 1) * data_size / self.splits))
# means half-open interval [split_start, split_end)
for j in range(split_start,split_end):
split_indices[i].extend(data_time[indices[j]])
# avoid sorted labels by sorting time dependent
split_indices = [sorted(split_list)
for split_list in split_indices]
#for index, splitlist in enumerate(split_indices):
# print index, "first: ", self.data[splitlist[0]][0].start_time, ", last: ", self.data[splitlist[-1]][0].start_time, ", Laenge: ", len(data_time.keys())
#print "time_dependent:\n", split_indices
elif self.stratified: # Stratified cross-validation
# divide the data with respect to the class_label
data_labeled = dict()
for (index, (window, label)) in enumerate(self.data):
if not data_labeled.has_key(label):
data_labeled[label] = [index]
else:
data_labeled[label].append(index)
# we should not have more splits then instances of every class!
min_nr_per_class = min([len(data) for data in data_labeled.values()])
if self.splits > min_nr_per_class:
self.splits = min_nr_per_class
self._log("Reducing number of splits to %s since no more "
"instances of one of the classes are available."
% self.splits, level=logging.CRITICAL)
# extend result structure since we need it in the next block
split_indices = [[] for i in range(self.splits)]
# determine the splits of the data
for label, indices in data_labeled.iteritems():
data_size = len(indices)
# Set random seed and randomize the order of the data
if self.random:
r = random.Random(self.run_number)
r.shuffle(indices)
for j in range(self.splits):
split_start = int(round(float(j) * data_size/self.splits))
split_end = int(round(float(j+1) * data_size/self.splits))
# means half-open interval [split_start, split_end)
split_indices[j].extend(indices[split_start: split_end])
# avoid sorted labels
for j in range(self.splits):
r = random.Random(self.run_number)
r.shuffle(split_indices[j])
# print "stratified:\n", split_indices
# old trunk version
# =================
# data_size = len(self.data)
# # Determine ratio of class1
# instance_labels = map(lambda x: x[1], self.data)
# classes = list(set(instance_labels))
# assert (len(classes) == 2),\
# "Stratified cross-validation works currently only for "\
# "binary classification tasks."
# class1_instances = instance_labels.count(classes[0])
# class2_instances = instance_labels.count(classes[1])
# if self.splits > min(class1_instances, class2_instances):
# self.set_permanent_attributes(splits = min(class1_instances,
# class2_instances))
# self._log("Reducing number of splits to %s since no more " \
# "instances of one of the classes are available."
# % self.splits)
# class1_ratio = float(class1_instances) / data_size
# # Determine which instances belong to which class
# class1_indices = []
# class2_indices = []
# for index, instance_label in enumerate(instance_labels):
# if instance_label == classes[0]:
# class1_indices.append(index)
# else:
# class2_indices.append(index)
#
# # Randomize order
# if self.random:
# r = random.Random(self.run_number)
# r.shuffle(class1_indices)
# r.shuffle(class2_indices)
#
# # Merge the two classes (such that they alternate in the appropriate
# # frequency)
# indices = []
# n = 0 # class1 counter
# for i in range(data_size):
# if i == round((n + 0.5) / class1_ratio):
# indices.append(class1_indices.pop())
# n += 1
# else:
# indices.append(class2_indices.pop())
else: # Non-stratified cross-validation
data_size = len(self.data)
# We cannot have more splits than data points
assert(data_size >= self.splits)
# Set random seed and randomize the order of the data
indices = range(data_size)
if self.random:
r = random.Random(self.run_number)
r.shuffle(indices)
# Determine the splits of the data
for i in range(self.splits):
split_start = int(round(float(i) * data_size / self.splits))
split_end = int(round(float(i + 1) * data_size / self.splits))
# means half-open interval [split_start, split_end)
split_indices.append(indices[split_start: split_end])
self.split_indices = split_indices
self._log("Benchmarking with split %s/%s" % (self.current_split + 1,
self.splits))
_NODE_MAPPING = {"CV_Splitter": CrossValidationSplitterNode}
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2015 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import base64
from jsonpath_rw import parse
from girder.utility.model_importer import ModelImporter
from girder.models.model_base import ValidationException
from girder.api.rest import RestException, getCurrentUser
from cumulus.constants import ClusterType, ClusterStatus
from cumulus.common.girder import get_task_token, _get_profile
import cumulus.tasks.cluster
import cumulus.tasks.job
import cumulus.ansible.tasks.cluster
from cumulus.common.jsonpath import get_property
class AbstractClusterAdapter(ModelImporter):
"""
This defines the interface to be used by all cluster adapters.
"""
def __init__(self, cluster):
self.cluster = cluster
self._state_machine = ClusterStatus(self)
self._model = self.model('cluster', 'cumulus')
@property
def status(self):
return self._state_machine.status
@status.setter
def status(self, status):
self._state_machine.to(
status, RestException(
'Cluster is in state %s and cannot transition to state %s' %
(self._state_machine.status, status), code=400))
self._model.update_status(self.cluster['_id'], status)
def validate(self):
"""
Adapters may implement this if they need to perform any validation
steps whenever the cluster info is saved to the database. It should
return the document with any necessary alterations in the success case,
or throw an exception if validation fails.
"""
return self.cluster
def start(self, request_body):
"""
Adapters may implement this if they support a start operation.
"""
raise ValidationException(
'This cluster type does not support a start operation')
def terminate(self):
"""
Adapters may implement this if they support a terminate operation.
"""
raise ValidationException(
'This cluster type does not support a terminate operation')
def update(self, request_body):
"""
Adapters may implement this if they support a update operation.
"""
raise ValidationException(
'This cluster type does not support a update operation')
def delete(self):
"""
Adapters may implement this if they support a delete operation.
"""
# If an assetstore was created for this cluster then try to remove it
if 'assetstoreId' in self.cluster:
try:
assetstore = self.model('assetstore').load(
self.cluster['assetstoreId'])
self.model('assetstore').remove(assetstore)
except ValidationException:
# If we still have files associated with the assetstore then
# leave it.
pass
def submit_job(self, job):
log_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl,
job['_id'])
girder_token = get_task_token()['_id']
cumulus.tasks.job.submit(
girder_token,
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
job, log_url)
class AnsibleClusterAdapter(AbstractClusterAdapter):
"""
This defines the interface to be used by all cluster adapters.
"""
DEFAULT_PLAYBOOK = 'ec2'
def validate(self):
"""
Adapters may implement this if they need to perform any validation
steps whenever the cluster info is saved to the database. It should
return the document with any necessary alterations in the success case,
or throw an exception if validation fails.
"""
return self.cluster
def launch(self):
self.status = ClusterStatus.LAUNCHING
base_url = cumulus.config.girder.baseUrl
log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
girder_token = get_task_token()['_id']
profile, secret_key = _get_profile(self.cluster['profileId'])
playbook = get_property(
'config.launch.spec', self.cluster, default=self.DEFAULT_PLAYBOOK)
playbook_params = get_property(
'config.launch.params', self.cluster, default={})
playbook_params['cluster_state'] = ClusterStatus.RUNNING
cumulus.ansible.tasks.cluster.launch_cluster \
.delay(playbook,
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
profile, secret_key, playbook_params, girder_token,
log_write_url, ClusterStatus.RUNNING)
return self.cluster
def terminate(self):
self.status = ClusterStatus.TERMINATING
base_url = cumulus.config.girder.baseUrl
log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
girder_token = get_task_token()['_id']
profile, secret_key = _get_profile(self.cluster['profileId'])
playbook = get_property(
'config.launch.spec', self.cluster, default=self.DEFAULT_PLAYBOOK)
playbook_params = get_property(
'config.launch.params', self.cluster, default={})
playbook_params['cluster_state'] = 'absent'
cumulus.ansible.tasks.cluster.terminate_cluster \
.delay(playbook,
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
profile, secret_key, playbook_params, girder_token,
log_write_url, ClusterStatus.TERMINATED)
def provision(self):
self.status = ClusterStatus.PROVISIONING
base_url = cumulus.config.girder.baseUrl
log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
girder_token = get_task_token()['_id']
profile, secret_key = _get_profile(self.cluster['profileId'])
playbook = get_property(
'config.provision.spec', self.cluster,
default=self.DEFAULT_PLAYBOOK)
playbook_params = get_property(
'config.provision.params', self.cluster, default={})
provision_ssh_user = get_property(
'config.provision.ssh.user', self.cluster, default='ubuntu')
playbook_params['cluster_state'] = ClusterStatus.RUNNING
playbook_params['ansible_ssh_user'] = provision_ssh_user
cumulus.ansible.tasks.cluster.provision_cluster \
.delay(playbook,
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
profile, secret_key, playbook_params,
girder_token, log_write_url, ClusterStatus.RUNNING)
return self.cluster
def start(self, request_body):
"""
Adapters may implement this if they support a start operation.
"""
self.status = ClusterStatus.LAUNCHING
self.cluster['config'].setdefault('provision', {})\
.setdefault('params', {}).update(request_body)
self.cluster = self.model('cluster', 'cumulus').save(self.cluster)
base_url = cumulus.config.girder.baseUrl
log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
girder_token = get_task_token()['_id']
profile, secret_key = _get_profile(self.cluster['profileId'])
# Launch
launch_playbook = get_property(
'config.launch.spec', self.cluster, default=self.DEFAULT_PLAYBOOK)
launch_playbook_params = get_property(
'config.launch.params', self.cluster, default={})
launch_playbook_params['cluster_state'] = ClusterStatus.RUNNING
# Provision
provision_playbook = get_property(
'config.provision.spec', self.cluster, default='gridengine/site')
provision_playbook_params = get_property(
'config.provision.params', self.cluster, default={})
provision_ssh_user = get_property(
'config.provision.ssh.user', self.cluster, default='ubuntu')
provision_playbook_params['ansible_ssh_user'] = provision_ssh_user
provision_playbook_params['cluster_state'] = ClusterStatus.RUNNING
cumulus.ansible.tasks.cluster.start_cluster \
.delay(launch_playbook,
# provision playbook
provision_playbook,
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
profile, secret_key,
launch_playbook_params, provision_playbook_params,
girder_token, log_write_url)
def delete(self):
"""
Adapters may implement this if they support a delete operation.
"""
if self.status not in [ClusterStatus.CREATED,
ClusterStatus.ERROR,
ClusterStatus.TERMINATED,
ClusterStatus.TERMINATED]:
raise RestException(
'Cluster is in state %s and cannot be deleted' %
self.status, code=400)
def _validate_key(key):
try:
parts = key.split()
key_type, key_string = parts[:2]
data = base64.b64decode(key_string.encode('utf8'))
return data[4:11].decode('utf8') == key_type
except Exception:
return False
class TraditionClusterAdapter(AbstractClusterAdapter):
def validate(self):
query = {
'name': self.cluster['name'],
'userId': getCurrentUser()['_id'],
'type': 'trad'
}
if '_id' in self.cluster:
query['_id'] = {'$ne': self.cluster['_id']}
duplicate = self.model('cluster', 'cumulus').findOne(query,
fields=['_id'])
if duplicate:
raise ValidationException(
'A cluster with that name already exists.', 'name')
return self.cluster
def update(self, body):
# Use JSONPath to extract out what we need
passphrase = parse('config.ssh.passphrase').find(body)
public_key = parse('config.ssh.publicKey').find(body)
if passphrase:
ssh = self.cluster['config'].setdefault('ssh', {})
ssh['passphrase'] = passphrase[0].value
if public_key:
public_key = public_key[0].value
if not _validate_key(public_key):
raise RestException('Invalid key format', 400)
ssh = self.cluster['config'].setdefault('ssh', {})
ssh['publicKey'] = public_key
self.cluster = self.model('cluster', 'cumulus').save(self.cluster)
# Don't return the access object
del self.cluster['access']
# Don't return the log
del self.cluster['log']
# Don't return the passphrase
if parse('config.ssh.passphrase').find(self.cluster):
del self.cluster['config']['ssh']['passphrase']
return self.cluster
def start(self, request_body):
if self.cluster['status'] == ClusterStatus.CREATING:
raise RestException('Cluster is not ready to start.', code=400)
log_write_url = '%s/clusters/%s/log' % (cumulus.config.girder.baseUrl,
self.cluster['_id'])
girder_token = get_task_token()['_id']
cumulus.tasks.cluster.test_connection \
.delay(self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
log_write_url=log_write_url,
girder_token=girder_token)
def delete(self):
super(TraditionClusterAdapter, self).delete()
# Clean up key associate with cluster
cumulus.ssh.tasks.key.delete_key_pair.delay(
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
get_task_token()['_id'])
class NewtClusterAdapter(AbstractClusterAdapter):
def validate(self):
query = {
'name': self.cluster['name'],
'userId': getCurrentUser()['_id'],
'type': 'trad'
}
if '_id' in self.cluster:
query['_id'] = {'$ne': self.cluster['_id']}
duplicate = self.model('cluster', 'cumulus').findOne(query,
fields=['_id'])
if duplicate:
raise ValidationException(
'A cluster with that name already exists.', 'name')
return self.cluster
def update(self, body):
# Don't return the access object
del self.cluster['access']
# Don't return the log
del self.cluster['log']
return self.cluster
def _generate_girder_token(self):
user = self.model('user').load(self.cluster['userId'], force=True)
girder_token = self.model('token').createToken(user=user, days=7)
return girder_token['_id']
def start(self, request_body):
log_write_url = '%s/clusters/%s/log' % (cumulus.config.girder.baseUrl,
self.cluster['_id'])
girder_token = get_task_token(self.cluster)['_id']
cumulus.tasks.cluster.test_connection \
.delay(
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
log_write_url=log_write_url, girder_token=girder_token)
def submit_job(self, job):
log_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl, job['_id'])
girder_token = get_task_token(self.cluster)['_id']
cumulus.tasks.job.submit(
girder_token,
self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
job, log_url)
type_to_adapter = {
ClusterType.EC2: AnsibleClusterAdapter,
ClusterType.ANSIBLE: AnsibleClusterAdapter,
ClusterType.TRADITIONAL: TraditionClusterAdapter,
ClusterType.NEWT: NewtClusterAdapter
}
def get_cluster_adapter(cluster):
global type_to_adapter
return type_to_adapter[cluster['type']](cluster)
|
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(905, 649)
Dialog.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.wordlist = QtWidgets.QTreeView(Dialog)
self.wordlist.setGeometry(QtCore.QRect(20, 90, 181, 511))
font = QtGui.QFont()
font.setFamily("Menlo")
self.wordlist.setFont(font)
self.wordlist.setAutoFillBackground(False)
self.wordlist.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.wordlist.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.wordlist.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.wordlist.setAlternatingRowColors(True)
self.wordlist.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.wordlist.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.wordlist.setVerticalScrollMode(QtWidgets.QAbstractItemView.ScrollPerItem)
self.wordlist.setRootIsDecorated(False)
self.wordlist.setObjectName("wordlist")
self.label_wordlist = QtWidgets.QLabel(Dialog)
self.label_wordlist.setGeometry(QtCore.QRect(20, 50, 181, 20))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_wordlist.setFont(font)
self.label_wordlist.setAlignment(QtCore.Qt.AlignCenter)
self.label_wordlist.setObjectName("label_wordlist")
self.label_word = QtWidgets.QLabel(Dialog)
self.label_word.setGeometry(QtCore.QRect(230, 70, 441, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_word.setFont(font)
self.label_word.setTextFormat(QtCore.Qt.AutoText)
self.label_word.setScaledContents(False)
self.label_word.setAlignment(QtCore.Qt.AlignCenter)
self.label_word.setObjectName("label_word")
self.label_word_show = QtWidgets.QLabel(Dialog)
self.label_word_show.setGeometry(QtCore.QRect(230, 100, 441, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_word_show.setFont(font)
self.label_word_show.setText("")
self.label_word_show.setAlignment(QtCore.Qt.AlignCenter)
self.label_word_show.setObjectName("label_word_show")
self.label_def = QtWidgets.QLabel(Dialog)
self.label_def.setGeometry(QtCore.QRect(230, 130, 441, 20))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_def.setFont(font)
self.label_def.setAlignment(QtCore.Qt.AlignCenter)
self.label_def.setObjectName("label_def")
self.label_def_show = QtWidgets.QLabel(Dialog)
self.label_def_show.setGeometry(QtCore.QRect(230, 150, 441, 51))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_def_show.setFont(font)
self.label_def_show.setText("")
self.label_def_show.setAlignment(QtCore.Qt.AlignCenter)
self.label_def_show.setWordWrap(True)
self.label_def_show.setObjectName("label_def_show")
self.label_samp = QtWidgets.QLabel(Dialog)
self.label_samp.setGeometry(QtCore.QRect(230, 210, 441, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_samp.setFont(font)
self.label_samp.setAlignment(QtCore.Qt.AlignCenter)
self.label_samp.setObjectName("label_samp")
self.label_samp_show = QtWidgets.QLabel(Dialog)
self.label_samp_show.setGeometry(QtCore.QRect(230, 240, 441, 61))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_samp_show.setFont(font)
self.label_samp_show.setAlignment(QtCore.Qt.AlignCenter)
self.label_samp_show.setWordWrap(True)
self.label_samp_show.setObjectName("label_samp_show")
self.console_show_history = QtWidgets.QTextBrowser(Dialog)
self.console_show_history.setGeometry(QtCore.QRect(230, 330, 631, 251))
font = QtGui.QFont()
font.setFamily("Menlo")
self.console_show_history.setFont(font)
self.console_show_history.setObjectName("console_show_history")
self.label_console = QtWidgets.QLabel(Dialog)
self.label_console.setGeometry(QtCore.QRect(230, 310, 631, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_console.setFont(font)
self.label_console.setAlignment(QtCore.Qt.AlignCenter)
self.label_console.setObjectName("label_console")
self.button_show_wordlist = QtWidgets.QPushButton(Dialog)
self.button_show_wordlist.setGeometry(QtCore.QRect(15, 600, 191, 32))
font = QtGui.QFont()
font.setFamily("Menlo")
self.button_show_wordlist.setFont(font)
self.button_show_wordlist.setObjectName("button_show_wordlist")
self.button_quit = QtWidgets.QPushButton(Dialog)
self.button_quit.setGeometry(QtCore.QRect(756, 600, 111, 32))
font = QtGui.QFont()
font.setFamily("Menlo")
self.button_quit.setFont(font)
self.button_quit.setObjectName("button_quit")
self.console = QtWidgets.QLineEdit(Dialog)
self.console.setGeometry(QtCore.QRect(230, 580, 574, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.console.setFont(font)
self.console.setObjectName("console")
self.button_help = QtWidgets.QPushButton(Dialog)
self.button_help.setGeometry(QtCore.QRect(224, 600, 111, 32))
font = QtGui.QFont()
font.setFamily("Menlo")
self.button_help.setFont(font)
self.button_help.setObjectName("button_help")
self.check_code = QtWidgets.QCheckBox(Dialog)
self.check_code.setGeometry(QtCore.QRect(800, 310, 61, 20))
font = QtGui.QFont()
font.setFamily("Menlo")
self.check_code.setFont(font)
self.check_code.setChecked(False)
self.check_code.setObjectName("check_code")
self.button_clear = QtWidgets.QPushButton(Dialog)
self.button_clear.setGeometry(QtCore.QRect(796, 576, 71, 32))
font = QtGui.QFont()
font.setFamily("Menlo")
self.button_clear.setFont(font)
self.button_clear.setObjectName("button_clear")
self.label_info = QtWidgets.QLabel(Dialog)
self.label_info.setGeometry(QtCore.QRect(339, 605, 411, 20))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_info.setFont(font)
self.label_info.setText("")
self.label_info.setAlignment(QtCore.Qt.AlignCenter)
self.label_info.setObjectName("label_info")
self.combo_box_mode = QtWidgets.QComboBox(Dialog)
self.combo_box_mode.setGeometry(QtCore.QRect(227, 306, 101, 30))
font = QtGui.QFont()
font.setFamily("Menlo")
self.combo_box_mode.setFont(font)
self.combo_box_mode.setObjectName("combo_box_mode")
self.combo_box_mode.addItem("")
self.combo_box_mode.addItem("")
self.combo_box_mode.addItem("")
self.search_box = QtWidgets.QLineEdit(Dialog)
self.search_box.setGeometry(QtCore.QRect(20, 70, 181, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.search_box.setFont(font)
self.search_box.setObjectName("search_box")
self.check_more = QtWidgets.QCheckBox(Dialog)
self.check_more.setGeometry(QtCore.QRect(800, 40, 71, 20))
font = QtGui.QFont()
font.setFamily("Menlo")
self.check_more.setFont(font)
self.check_more.setObjectName("check_more")
self.reload_selection_box = QtWidgets.QLineEdit(Dialog)
self.reload_selection_box.setGeometry(QtCore.QRect(20, 24, 81, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.reload_selection_box.setFont(font)
self.reload_selection_box.setObjectName("reload_selection_box")
self.button_reload = QtWidgets.QPushButton(Dialog)
self.button_reload.setGeometry(QtCore.QRect(92, 20, 111, 32))
font = QtGui.QFont()
font.setFamily("Menlo")
self.button_reload.setFont(font)
self.button_reload.setObjectName("button_reload")
self.label_thesaurus = QtWidgets.QLabel(Dialog)
self.label_thesaurus.setGeometry(QtCore.QRect(670, 70, 181, 21))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_thesaurus.setFont(font)
self.label_thesaurus.setTextFormat(QtCore.Qt.AutoText)
self.label_thesaurus.setScaledContents(False)
self.label_thesaurus.setAlignment(QtCore.Qt.AlignCenter)
self.label_thesaurus.setObjectName("label_thesaurus")
self.label_thesaurus_show = QtWidgets.QLabel(Dialog)
self.label_thesaurus_show.setGeometry(QtCore.QRect(670, 100, 181, 191))
font = QtGui.QFont()
font.setFamily("Menlo")
self.label_thesaurus_show.setFont(font)
self.label_thesaurus_show.setText("")
self.label_thesaurus_show.setAlignment(QtCore.Qt.AlignCenter)
self.label_thesaurus_show.setObjectName("label_thesaurus_show")
self.combo_box_asking = QtWidgets.QComboBox(Dialog)
self.combo_box_asking.setGeometry(QtCore.QRect(330, 306, 121, 30))
font = QtGui.QFont()
font.setFamily("Menlo")
self.combo_box_asking.setFont(font)
self.combo_box_asking.setObjectName("combo_box_asking")
self.combo_box_asking.addItem("")
self.combo_box_asking.addItem("")
self.combo_box_asking.addItem("")
self.combo_box_asking.addItem("")
self.actionread_in = QtWidgets.QAction(Dialog)
self.actionread_in.setCheckable(False)
self.actionread_in.setObjectName("actionread_in")
self.retranslateUi(Dialog)
self.combo_box_mode.setCurrentIndex(0)
self.combo_box_asking.setCurrentIndex(0)
self.button_quit.clicked.connect(Dialog.close)
QtCore.QMetaObject.connectSlotsByName(Dialog)
Dialog.setTabOrder(self.console, self.button_show_wordlist)
Dialog.setTabOrder(self.button_show_wordlist, self.button_quit)
Dialog.setTabOrder(self.button_quit, self.console_show_history)
Dialog.setTabOrder(self.console_show_history, self.wordlist)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "AlphabetQs"))
self.label_wordlist.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:14pt; font-weight:600;\">WORD LIST</span></p></body></html>"))
self.label_word.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">WORD</span></p></body></html>"))
self.label_def.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">DEFINITION</span></p></body></html>"))
self.label_samp.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">SAMPLE SENTENCE</span></p></body></html>"))
self.label_samp_show.setText(_translate("Dialog", "<html><head/><body><p><br/></p></body></html>"))
self.label_console.setText(_translate("Dialog", "<html><head/><body><p align=\"center\"><span style=\" font-size:14pt; font-weight:600;\">CONSOLE</span></p></body></html>"))
self.button_show_wordlist.setText(_translate("Dialog", "SHOW"))
self.button_quit.setText(_translate("Dialog", "QUIT"))
self.button_help.setText(_translate("Dialog", "HELP"))
self.check_code.setText(_translate("Dialog", "CODE"))
self.button_clear.setText(_translate("Dialog", "CLEAR"))
self.combo_box_mode.setCurrentText(_translate("Dialog", "SEARCH"))
self.combo_box_mode.setItemText(0, _translate("Dialog", "SEARCH"))
self.combo_box_mode.setItemText(1, _translate("Dialog", "RANDOM"))
self.combo_box_mode.setItemText(2, _translate("Dialog", "ORDER"))
self.search_box.setWhatsThis(_translate("Dialog", "Search Box"))
self.search_box.setPlaceholderText(_translate("Dialog", "Search # or word"))
self.check_more.setText(_translate("Dialog", "MORE"))
self.button_reload.setText(_translate("Dialog", "RELOAD"))
self.label_thesaurus.setText(_translate("Dialog", "<html><head/><body><p><span style=\" font-weight:600; text-decoration: underline;\">THESAURUS</span></p></body></html>"))
self.combo_box_asking.setCurrentText(_translate("Dialog", "WORD"))
self.combo_box_asking.setItemText(0, _translate("Dialog", "WORD"))
self.combo_box_asking.setItemText(1, _translate("Dialog", "DEFINITION"))
self.combo_box_asking.setItemText(2, _translate("Dialog", "SAMPLE"))
self.combo_box_asking.setItemText(3, _translate("Dialog", "THESAURUS"))
self.actionread_in.setText(_translate("Dialog", "read_in"))
|
|
#!/usr/bin/env python
# VMware vSphere Python SDK
# Copyright (c) 2008-2021 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from tools import cli, service_instance, pchelper
from pyVmomi import vim
# Demonstrates:
# =============
# * How to write python 2.7 and 3.3 compatible code in one script
# * How to parse arguments in a python script
# * How to pretty print format a dictionary
# * How to connect to a vSphere instance
# * How to search for virtual machines efficiently
# * How to interrogate virtual machine hardware info
# * How to determine the data type of a dynamic object instance
# * How to build a summary of a virtual device & virtual disk
# * How to interrogate a datastore and its hosts mounts
#
# Not shown, how to ask a datastore for all the virtual machines it 'owns'
#
# Sample output:
#
# $ virtual_machine_device_info.py -s vcsa -u my_user -i 172.16.254.101
#
# Found Virtual Machine
# =====================
# guest OS name : Ubuntu Linux (64-bit)
# name : box
# last booted timestamp : 2014-10-13 01:45:57.647340+00:00
# bios UUID : 420264ab-848b-1586-b589-b9bd3a71b3aa
# path to VM : [storage0] box/box.vmx
# guest OS id : ubuntu64Guest
# host name : esx_host_01
# instance UUID : 500221fe-3473-60ff-fab2-1811600208a0
# Devices:
# --------
# label: IDE 0
# ------------------
# device type : vim.vm.device.VirtualIDEController
# backing type : NoneType
# key : 200
# summary : IDE 0
# label: IDE 1
# ------------------
# device type : vim.vm.device.VirtualIDEController
# backing type : NoneType
# key : 201
# summary : IDE 1
# label: PS2 controller 0
# ------------------
# device type : vim.vm.device.VirtualPS2Controller
# backing type : NoneType
# key : 300
# summary : PS2 controller 0
# label: PCI controller 0
# ------------------
# device type : vim.vm.device.VirtualPCIController
# backing type : NoneType
# key : 100
# summary : PCI controller 0
# label: SIO controller 0
# ------------------
# device type : vim.vm.device.VirtualSIOController
# backing type : NoneType
# key : 400
# summary : SIO controller 0
# label: Keyboard
# ------------------
# device type : vim.vm.device.VirtualKeyboard
# backing type : NoneType
# key : 600
# summary : Keyboard
# label: Pointing device
# ------------------
# device type : vim.vm.device.VirtualPointingDevice
# backing type : vim.vm.device.VirtualPointingDevice.DeviceBackingInfo
# key : 700
# summary : Pointing device; Device
# ------------------
# label: Video card
# ------------------
# device type : vim.vm.device.VirtualVideoCard
# backing type : NoneType
# key : 500
# summary : Video card
# label: VMCI device
# ------------------
# device type : vim.vm.device.VirtualVMCIDevice
# backing type : NoneType
# key : 12000
# summary : Device on the virtual machine PCI bus that provides supp
# label: SCSI controller 0
# ------------------
# device type : vim.vm.device.VirtualLsiLogicController
# backing type : NoneType
# key : 1000
# summary : LSI Logic
# label: Hard disk 1
# ------------------
# device type : vim.vm.device.VirtualDisk
# backing type : vim.vm.device.VirtualDisk.FlatVer2BackingInfo
# key : 2000
# summary : 16,777,216 KB
# datastore
# name: storage0
# host: esx_host_01
# summary
# url: ds:///vmfs/volumes/501fa6d9-8907f56a-fa19-782bcb74158e/
# freeSpace: 5750390784
# file system: VMFS
# capacity: 494726545408
# fileName: [storage0] box/box.vmdk
# device ID: None
# ------------------
# label: CD/DVD drive 1
# ------------------
# device type : vim.vm.device.VirtualCdrom
# backing type : vim.vm.device.VirtualCdrom.AtapiBackingInfo
# key : 3002
# summary : ATAPI /vmfs/devices/cdrom/mpx.vmhba0:C0:T0:L0
# ------------------
# label: Network adapter 1
# ------------------
# device type : vim.vm.device.VirtualE1000
# backing type : vim.vm.device.VirtualEthernetCard.NetworkBackingInfo
# key : 4000
# summary : VM Network
# ------------------
# label: Floppy drive 1
# ------------------
# device type : vim.vm.device.VirtualFloppy
# backing type : vim.vm.device.VirtualFloppy.RemoteDeviceBackingInfo
# key : 8000
# summary : Remote
# ------------------
# =====================
parser = cli.Parser()
parser.add_optional_arguments(cli.Argument.UUID, cli.Argument.VM_IP, cli.Argument.VM_NAME)
args = parser.get_args()
# form a connection...
si = service_instance.connect(args)
# http://pubs.vmware.com/vsphere-55/topic/com.vmware.wssdk.apiref.doc/vim.SearchIndex.html
search_index = si.content.searchIndex
# without exception find managed objects using durable identifiers that the
# search index can find easily. This is much better than caching information
# that is non-durable and potentially buggy.
vm = None
if args.uuid:
vm = search_index.FindByUuid(None, args.uuid, True)
elif args.vm_ip:
vm = search_index.FindByIp(None, args.vm_ip, True)
elif args.vm_name:
content = si.RetrieveContent()
vm = pchelper.get_obj(content, [vim.VirtualMachine], args.vm_name)
if not vm:
print(u"Could not find a virtual machine to examine.")
sys.exit(1)
print(u"Found Virtual Machine")
print(u"=====================")
details = {'name': vm.summary.config.name,
'instance UUID': vm.summary.config.instanceUuid,
'bios UUID': vm.summary.config.uuid,
'path to VM': vm.summary.config.vmPathName,
'guest OS id': vm.summary.config.guestId,
'guest OS name': vm.summary.config.guestFullName,
'host name': vm.runtime.host.name,
'last booted timestamp': vm.runtime.bootTime}
for name, value in details.items():
print(u" {0:{width}{base}}: {1}".format(name, value, width=25, base='s'))
print(u" Devices:")
print(u" --------")
for device in vm.config.hardware.device:
# diving into each device, we pull out a few interesting bits
dev_details = {'key': device.key,
'summary': device.deviceInfo.summary,
'device type': type(device).__name__,
'backing type': type(device.backing).__name__}
print(u" label: {0}".format(device.deviceInfo.label))
print(u" ------------------")
for name, value in dev_details.items():
print(u" {0:{width}{base}}: {1}".format(name, value,
width=15, base='s'))
if device.backing is None:
continue
# the following is a bit of a hack, but it lets us build a summary
# without making many assumptions about the backing type, if the
# backing type has a file name we *know* it's sitting on a datastore
# and will have to have all of the following attributes.
if hasattr(device.backing, 'fileName'):
datastore = device.backing.datastore
if datastore:
print(u" datastore")
print(u" name: {0}".format(datastore.name))
# there may be multiple hosts, the host property
# is a host mount info type not a host system type
# but we can navigate to the host system from there
for host_mount in datastore.host:
host_system = host_mount.key
print(u" host: {0}".format(host_system.name))
print(u" summary")
summary = {'capacity': datastore.summary.capacity,
'freeSpace': datastore.summary.freeSpace,
'file system': datastore.summary.type,
'url': datastore.summary.url}
for key, val in summary.items():
print(u" {0}: {1}".format(key, val))
print(u" fileName: {0}".format(device.backing.fileName))
print(u" device ID: {0}".format(device.backing.backingObjectId))
print(u" ------------------")
print(u"=====================")
sys.exit()
|
|
#!/usr/bin/env python3
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import json
import os
import subprocess
import sys
import tempfile
from shutil import which
cfg_cflags = {}
cfg_ldflags = {}
cfg_kconfig = {}
makefile_vars = {}
def run_command(cmd):
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT,
shell=True, universal_newlines=True)
return output.replace("\n", "").strip(), True
except subprocess.CalledProcessError as e:
return e.output, False
def handle_pkgconfig_check(args, conf):
dep = conf["dependency"].upper()
pkg = conf["pkgname"]
atleast_ver = conf.get("atleast-version")
max_ver = conf.get("max-version")
exact_ver = conf.get("exact-version")
ver_match = True
if exact_ver:
cmd = "pkg-config --exact-version=%s %s" % (exact_ver, pkg)
result, status = run_command(cmd)
if not status:
ver_match = False
elif atleast_ver:
cmd = "pkg-config --atleast-version=%s %s" % (atleast_ver, pkg)
result, status = run_command(cmd)
if not status:
ver_match = False
elif max_ver:
cmd = "pkg-config --max-version=%s %s" % (max_ver, pkg)
result, status = run_command(cmd)
if not status:
ver_match = False
cflags_stat = None
ldflags_stat = None
if ver_match:
cflags_cmd = "pkg-config --cflags %s" % pkg
ldflags_cmd = "pkg-config --libs %s" % pkg
cflags, cflags_stat = run_command(cflags_cmd)
ldflags, ldflags_stat = run_command(ldflags_cmd)
if cflags_stat:
cfg_cflags["%s_CFLAGS" % dep] = cflags
if ldflags_stat:
cfg_ldflags["%s_LDFLAGS" % dep] = ldflags
have_var = "y" if ((cflags_stat or ldflags_stat) and ver_match) else "n"
cfg_kconfig["HAVE_%s" % dep] = have_var
def compile_test(source, compiler, cflags, ldflags):
f = tempfile.NamedTemporaryFile(suffix=".c",delete=False)
f.write(bytes(source, 'UTF-8'))
f.close()
output = "%s-bin" % f.name
cmd = "{compiler} {cflags} {ldflags} {src} -o {out}".format(compiler=compiler,
cflags=cflags, ldflags=ldflags, src=f.name, out=output)
out, status = run_command(cmd)
if os.path.exists(output):
os.unlink(output)
os.unlink(f.name)
return status
def handle_ccode_check(args, conf):
dep = conf["dependency"].upper()
source = ""
for i in conf["headers"]:
source += "#include %s\n" % i
fragment = conf.get("fragment") or ""
cstub = "{headers}\nint main(int argc, char **argv){{\n {fragment} return 0;\n}}"
source = cstub.format(headers=source, fragment=fragment)
success = compile_test(source, args.compiler, args.cflags, conf.get("ldflags") or "")
if success:
cfg_cflags["%s_CFLAGS" % dep] = conf.get("cflags") or ""
cfg_ldflags["%s_LDFLAGS" % dep] = conf.get("ldflags") or ""
cfg_kconfig["HAVE_%s" % dep] = "y"
else:
cfg_kconfig["HAVE_%s" % dep] = "n"
def handle_exec_check(args, conf):
dep = conf["dependency"].upper()
path = which(conf["exec"]) or None
makefile_vars[dep] = path
def handle_python_check(args, conf):
dep = conf["dependency"].upper()
if conf.get("pkgname"):
source = "import %s" % conf.get("pkgname")
f = tempfile.NamedTemporaryFile(suffix=".py",delete=False)
f.write(bytes(source, 'UTF-8'))
f.close()
cmd = "%s %s" % (sys.executable, f.name)
output, status = run_command(cmd)
makefile_vars["HAVE_PYTHON_%s" % dep] = "y" if status else "n"
type_handlers = {
"pkg-config": handle_pkgconfig_check,
"ccode": handle_ccode_check,
"exec": handle_exec_check,
"python": handle_python_check,
}
def var_str(items):
output = ""
for k,v in items:
if not v: continue
output += "%s ?= %s\n" % (k, v)
return output
def makefile_gen(args):
output = ""
output += var_str(makefile_vars.items())
output += var_str(cfg_cflags.items())
output += var_str(cfg_ldflags.items())
f = open(args.makefile_output, "w+")
f.write(output)
f.close()
def kconfig_gen(args):
output = ""
for k,v in cfg_kconfig.items():
output += "config {config}\n{indent}bool\n{indent}default {enabled}\n". \
format(config=k, indent=" ", enabled=v)
f = open(args.kconfig_output, "w+")
f.write(output)
f.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--compiler", help="The gcc compiler[for headers based tests]",
type=str, default="gcc")
parser.add_argument("--cflags", help="Additional cflags[for headers based tests]",
type=str, default="")
parser.add_argument("--kconfig-output", help="The kconfig fragment output file",
type=str, default="Kconfig.gen")
parser.add_argument("--makefile-output", help="The makefile fragment output file",
type=str, default="Makefile.gen")
parser.add_argument("--dep-config", help="The dependencies config file",
type=argparse.FileType("r"), default="data/jsons/dependencies.json")
args = parser.parse_args()
conf = json.loads(args.dep_config.read())
dep_checks = conf["dependencies"]
for i in dep_checks:
handler = type_handlers[i["type"]]
if not handler:
print("Could not handle type: %s" % i["type"])
continue
handler(args, i)
makefile_gen(args)
kconfig_gen(args)
|
|
# -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
import warnings
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
import chainladder as cl
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'numpydoc', 'sphinx_gallery.gen_gallery',
'sphinx.ext.githubpages', 'nbsphinx', 'sphinx.ext.mathjax',
'sphinx.ext.autosummary', 'sphinx_gallery.load_style',
'IPython.sphinxext.ipython_console_highlighting']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get('NO_MATHJAX'):
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
else:
extensions.append('sphinx.ext.mathjax')
mathjax_path = ('https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.0/'
'MathJax.js?config=TeX-AMS_SVG')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ChainLadder'
copyright = '2017, John Bogaardt'
author = 'John Bogaardt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = cl.__version__
# The full version, including alpha/beta/rc tags.
release = cl.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'templates', 'includes', 'themes', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': False, 'surveybanner': False,
'sprintbanner': False}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'chainladder'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static/images']
# -- Options for HTMLHelp output ------------------------------------------
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# Output file base name for HTML help builder.
htmlhelp_basename = 'chainladderdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', 'scikit-learn user guide',
'scikit-learn developers', 'manual'), ]
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/{.major}'.format(
sys.version_info), None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('https://matplotlib.org/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'joblib': ('https://joblib.readthedocs.io/en/latest/', None),
}
sphinx_gallery_conf = {
'doc_module': 'chainladder',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {'chainladder': None},
'capture_repr': ()
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_asvanced_triangle_001.png': 600,
'sphx_glr_plot_ave_analysis_001.png': 372,
}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/casact/chainladder-python/issues/{issue}'
issues_github_path = 'chainladder-python/chainladder'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_js_file('js/copybutton.js')
app.add_js_file('js/extra.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('chainladder',
u'https://github.com/casact/'
'chainladder-python/blob/{revision}/'
'{package}/{path}#L{lineno}')
warnings.filterwarnings("ignore", category=UserWarning,
module="matplotlib",
message='Matplotlib is currently using agg, which is a'
' non-GUI backend, so cannot show the figure.')
|
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404, render, redirect
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotAllowed
from django.conf import settings
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.forms.util import ValidationError
from operator import itemgetter
from collections import OrderedDict
from forms import *
from .xen import Xen
import json
import tasks
import string
import random
def gen_password(size=24, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
# Check if big list contains all items in small list
def contains(small, big):
for item in small:
if not item in big:
return False
return True
# check if network supports ipv6
def network_has_ipv6(poolname, network_uuid, ipv6_addr = None):
if not ipv6_addr:
return True
else:
network_details = Xen(poolname).network_details_uuid(network_uuid)
if not 'XenCenter.CustomFields.network.ipv6' in network_details['other_config']:
return False
return True
# mainpage
def index(request):
pools = settings.XENPOOLS
host_list = []
for poolname in pools:
hosts = Xen(poolname).get_host_list()
host_list += [{
'pool': poolname,
}]
for host in hosts:
host_list += [{
'host': host[0],
}]
return render(request, 'gridomatic/index.html', {'hosts': host_list})
# vm views
@login_required
def vm_list_combined(request):
data = []
listtags = []
pools = settings.XENPOOLS
filtertags = request.POST.getlist("tags")
form = TagsForm(request.POST or None)
for poolname in pools:
tags = Xen(poolname).get_tags()
for tag in tags:
listtags += [( tag, tag )]
vms = Xen(poolname).vm_list()
for ref,vm in vms.items():
if vm["is_a_template"] or vm['is_a_snapshot'] or vm["is_control_domain"]: continue
if contains(filtertags, vm['tags']):
data += [{
'name': vm['name_label'],
'power_state': vm['power_state'],
'uuid': vm['uuid'],
'poolname': poolname,
}]
sorted_data = sorted(data, key=itemgetter('name'))
listtags = list(set(listtags))
form.fields['tags'].choices = sorted(listtags)
if 'json' in request.REQUEST:
return HttpResponse(json.dumps({'vmlist': sorted_data}), content_type = "application/json")
else:
return render(request, 'gridomatic/vm_list.html', {'vmlist': sorted_data, 'form': form})
@login_required
def vm_details(request, poolname, uuid):
details = Xen(poolname).vm_details(uuid)
if details['power_state'] == 'Running':
host = Xen(poolname).host_details(details['resident_on'])
else:
host = {'name_label': 'Not implemented for stopped VMs yet'}
customfield = {}
for item in details['other_config']:
if 'XenCenter.CustomFields' not in item: continue
field = str(item.split('.')[2])
value = str(details['other_config'][item])
customfield[field] = value
data = []
data += [{
'name': details['name_label'],
'description': details['name_description'],
'poolname': poolname,
'host': host['name_label'],
'uuid': details['uuid'],
'powerstate': details['power_state'],
'vcpus': details['VCPUs_at_startup'],
'memory': details['memory_static_max'],
'tags': details['tags'],
'disks': Xen(poolname).disks_by_vdb(details['VBDs']),
'networks': Xen(poolname).network_details_ref(details['VIFs']),
'customfield': customfield,
}]
if 'json' in request.REQUEST:
return HttpResponse(json.dumps({'vmdetails': data}), content_type = "application/json")
else:
return render(request, 'gridomatic/vm_details.html', {'vmdetails': data})
@login_required
def vm_edit(request, poolname, uuid):
details = Xen(poolname).vm_details(uuid)
backup = False
pooltags = []
tags = Xen(poolname).get_tags()
for tag in tags:
pooltags += [( tag, tag )]
pooltags = list(set(pooltags))
vmtags = details['tags']
customfield = {}
# populate all possible customfields to show empty fields
poolcustomfields = Xen(poolname).get_other_config()
for item in poolcustomfields:
if 'XenCenter.CustomFields' not in item: continue
field = str(item.split('.')[2])
value = poolcustomfields[item]
if value == 'not set':
customfield[field] = value
# fill the custom fields with already excisting data
for item in details['other_config']:
if 'XenCenter.CustomFields' not in item: continue
field = str(item.split('.')[2])
value = str(details['other_config'][item])
customfield[field] = value
# We want a fancy select box for this one
del customfield['backup']
if 'XenCenter.CustomFields.backup' in details['other_config']:
if details['other_config']['XenCenter.CustomFields.backup'] == '1':
backup = True
form = VMEditForm(request.POST or None, extra=customfield ,initial={
'description': details['name_description'],
'cpu_cores': details['VCPUs_at_startup'],
'backup': backup,
'mem_size': int(details['memory_static_max'])/1024/1024,
'tags': vmtags,
})
if details['power_state'] == 'Running':
form.fields['mem_size'].widget.attrs['readonly'] = True
form.fields['cpu_cores'].widget.attrs['readonly'] = True
form.fields['tags'].choices = sorted(pooltags)
if form.is_valid():
Xen(poolname).vm_update(uuid, form.cleaned_data)
return redirect('vm_details', poolname, uuid )
return render(request, 'gridomatic/vm_edit.html', {'details': details, 'form': form, 'poolname': poolname})
@login_required
def vm_start(request, poolname):
uuid = request.POST.get('uuid', None)
task_id = tasks.vm_start.delay(poolname,uuid).id
return HttpResponse(json.dumps({'task_id': task_id}), content_type="application/json")
@login_required
def vm_stop(request, poolname):
uuid = request.POST.get('uuid', None)
task_id = tasks.vm_stop.delay(poolname,uuid).id
return HttpResponse(json.dumps({'task_id': task_id}), content_type="application/json")
@login_required
def vm_destroy(request, poolname):
uuid = request.POST.get('uuid', None)
task_id = tasks.vm_destroy.delay(poolname,uuid).id
return HttpResponse(json.dumps({'task_id': task_id}), content_type="application/json")
@login_required
def vm_restart(request, poolname):
uuid = request.POST.get('uuid', None)
task_id = tasks.vm_restart.delay(poolname,uuid).id
return HttpResponse(json.dumps({'task_id': task_id}), content_type="application/json")
@login_required
def vm_create(request, poolname):
customfield = {}
# populate all possible customfields to show empty fields
poolcustomfields = Xen(poolname).get_other_config()
for item in poolcustomfields:
if 'XenCenter.CustomFields' not in item: continue
field = str(item.split('.')[2])
value = poolcustomfields[item]
if value == 'not set':
customfield[field] = ""
# We want a fancy select box for this one
del customfield['backup']
form = VMCreateForm(request.POST or None, extra=customfield, initial={'password': gen_password()} )
x = Xen(poolname)
networks = x.network_list()
network_list = []
for net in networks:
if not 'Production' in networks[net]['tags']: continue
network_list += [(
networks[net]['uuid'],
networks[net]['name_label']
)]
masters = settings.PUPPETMASTERS
puppetmaster_list = []
for master in masters:
puppetmaster_list += [(
masters[master]['hostname'],
master,
)]
pooltags = []
tags = Xen(poolname).get_tags()
for tag in tags:
pooltags += [( tag, tag )]
pooltags = list(set(pooltags))
network_list_sorted = sorted(network_list, key=lambda x: x[1])
form.fields['network'].choices = network_list_sorted
form.fields['template'].choices = sorted(x.get_template_list())
form.fields['host'].choices = sorted(x.get_host_list(), reverse=True)
#form.fields['puppetmaster'].choices = sorted(puppetmaster_list)
form.fields['tags'].choices = sorted(pooltags)
if request.method == 'POST':
if not network_has_ipv6(poolname, request.POST['network'], request.POST['ip_address6']):
form.errors['ip_address6'] = 'Selected Network has no IPv6 support!'
if form.is_valid():
task_id = tasks.vm_deploy.delay(poolname,form.cleaned_data).id
return render(request, 'gridomatic/vm_create_wait.html', {'form': form, 'task_id': task_id, 'poolname': poolname})
return render(request, 'gridomatic/vm_create.html', {'form': form, 'poolname': poolname})
# Network views
@login_required
def network_list_combined(request):
data = []
listtags = []
filtertags = request.POST.getlist("tags")
pools = settings.XENPOOLS
form = TagsForm(request.POST or None)
for poolname in pools:
tags = Xen(poolname).get_tags()
for tag in tags:
listtags += [( tag, tag )]
networks = Xen(poolname).network_list()
for ref, net in networks.items():
if not net['tags']: continue
if contains(filtertags, net['tags']):
data += [{
'name': net['name_label'],
'description': net['name_description'],
'uuid': net['uuid'],
'poolname': poolname,
}]
listtags = list(set(listtags))
form.fields['tags'].choices = sorted(listtags)
sorted_data = sorted(data, key=itemgetter('name'))
if 'json' in request.REQUEST:
return HttpResponse(json.dumps({'networklist': sorted_data }), content_type = "application/json")
else:
return render(request, 'gridomatic/network_list.html', {'networklist': sorted_data, 'form': form })
@login_required
def network_create(request, poolname):
form = NetworkCreateForm(request.POST or None)
if form.is_valid():
Xen(poolname).network_create(form.cleaned_data)
return redirect('network_list_combined')
return render(request, 'gridomatic/network_create.html', {'form': form})
@login_required
def network_details(request, poolname, uuid):
details = Xen(poolname).network_details_uuid(uuid)
vifs = details['VIFs']
vms = Xen(poolname).vmdetails_by_vif(vifs)
data = []
if 'XenCenter.CustomFields.network.ipv6' in details['other_config']:
ipv6_gateway = str(details['other_config']['XenCenter.CustomFields.network.ipv6']).split('|', 2)[0]
ipv6_netmask = str(details['other_config']['XenCenter.CustomFields.network.ipv6']).split('|', 2)[1]
else:
ipv6_gateway = ""
ipv6_netmask = ""
get_vlan = Xen(poolname).pif_details(details['PIFs'][0])
data += [{
'name': details['name_label'],
'description': details['name_description'],
'ipv4_gateway': str(details['other_config']['XenCenter.CustomFields.network.ipv4']).split('|', 2)[0],
'ipv4_netmask': str(details['other_config']['XenCenter.CustomFields.network.ipv4']).split('|', 2)[1],
'ipv6_gateway': ipv6_gateway,
'ipv6_netmask': ipv6_netmask,
'dns_servers': str(details['other_config']['XenCenter.CustomFields.network.dns']),
'VLAN': get_vlan['VLAN'],
'uuid': details['uuid'],
'mtu': details['MTU'],
'tags': details['tags'],
'vms': vms,
'poolname': poolname,
}]
if 'json' in request.REQUEST:
return HttpResponse(json.dumps({'networkdetails': sorted(data)}), content_type = "application/json")
else:
return render(request, 'gridomatic/network_details.html', {'networkdetails': sorted(data)})
@login_required
def network_edit(request, poolname, uuid):
details = Xen(poolname).network_details_uuid(uuid)
form = NetworkEditForm(request.POST or None, initial={
'name': details['name_label'],
'description': details['name_description'],
})
if form.is_valid():
Xen(poolname).network_update(uuid, form.cleaned_data)
return redirect('network_details', poolname, uuid )
return render(request, 'gridomatic/network_edit.html', {'details': details, 'form': form, 'poolname': poolname})
|
|
# models.py
from django.db import models
from django.db.models.signals import pre_save
import os
import socket
import datetime
import random
# from django.forms import Textarea
from django import forms
from .unique_slugify import unique_slugify
from functools import reduce
def time2s(time):
""" given 's.s' or 'h:m:s.s' returns s.s """
if time:
sec = reduce(lambda x, i: x*60 + i,
list(map(float, time.split(':'))))
else:
sec = 0.0
return sec
class Client(models.Model):
sequence = models.IntegerField(default=1)
active = models.BooleanField(default=True,
help_text="Turn off to hide from UI.")
name = models.CharField(max_length=135)
slug = models.CharField(max_length=135, blank=True, null=False,
help_text="dir name to store input files", )
contacts = models.CharField(max_length=300, blank=True,
help_text='emails of people putting on the event.')
description = models.TextField(blank=True)
tags = models.TextField(null=True,blank=True,)
tweet_prefix = models.CharField(max_length=30, blank=True, null=True)
bucket_id = models.CharField(max_length=30, blank=True, null=True)
category_key = models.CharField(max_length=30, blank=True, null=True,
help_text = "Category for Richard")
# video encoding
template_mlt = models.CharField(max_length=60, null=True,
default="template.mlt",
help_text='template to make cutlist mlt from.')
title_svg = models.CharField(max_length=60, null=True,
default="title.svg",
help_text='template for event/title/authors title slide.')
preroll = models.CharField(max_length=335, blank=True,
help_text="name of video to prepend (not implemented)")
postroll = models.CharField(max_length=335, blank=True,
help_text="name of video to postpend (not implemented)")
credits = models.CharField(max_length=30, blank=True,
default="ndv-169.png",
help_text='added to end, store in assets dir')
# remote accounts to post to
host_user = models.CharField(max_length=30, blank=True, null=True,
help_text = "depricated - do not use.")
youtube_id = models.CharField(max_length=10, blank=True, null=True,
help_text = "key to lookup user/pw/etc from pw store" )
archive_id = models.CharField(max_length=10, blank=True, null=True)
vimeo_id = models.CharField(max_length=10, blank=True, null=True)
blip_id = models.CharField(max_length=10, blank=True, null=True)
rax_id = models.CharField(max_length=10, blank=True, null=True)
richard_id = models.CharField(max_length=10, blank=True, null=True)
email_id = models.CharField(max_length=10, blank=True, null=True)
tweet_id = models.CharField(max_length=10, blank=True, null=True)
def __str__(self):
return self.name
@models.permalink
def get_absolute_url(self):
return ('client', [self.slug,])
class Meta:
ordering = ["sequence"]
class Location(models.Model):
sequence = models.IntegerField(default=1)
active = models.BooleanField( default=True,
help_text="Turn off to hide from UI.")
default = models.BooleanField(default=True,
help_text="Adds this loc to new Clients.")
name = models.CharField(max_length=135,
default=socket.gethostname(),
help_text="room name")
slug = models.CharField(max_length=135, blank=True, null=False,
help_text="dir name to store input files")
dirname = models.CharField(max_length=135, blank=True,
help_text="path to raw files. overrieds show/slug.")
channelcopy = models.CharField(max_length=2, blank=True,
help_text='audio adjustment for this room')
hours_offset = models.IntegerField(blank=True, null=True,
help_text='Adjust for bad clock setting')
description = models.TextField(blank=True)
lon = models.FloatField(null=True, blank=True )
lat = models.FloatField(null=True, blank=True )
def natural_key(self):
return self.name
def __str__(self):
return "%s" % ( self.name )
class Meta:
ordering = ["sequence"]
ANN_STATES=((1,'preview'),(2,'review'),(3,'approved'))
class Show(models.Model):
client = models.ForeignKey(Client)
locations = models.ManyToManyField(Location,
limit_choices_to={'active': True},
blank=True)
sequence = models.IntegerField(default=1)
active = models.BooleanField( default=True,
help_text="Turn off to hide from UI.")
name = models.CharField(max_length=135)
slug = models.CharField(max_length=135, blank=True, null=False,
help_text="dir name to store input files")
category_key = models.CharField(max_length=30, blank=True, null=True,
help_text = "Category for Richard")
tags = models.TextField(null=True,blank=True,)
description = models.TextField(blank=True)
conf_url = models.CharField(max_length=200, null=True, blank=True)
schedule_url = models.CharField(max_length=235, null=True, blank=True)
announcement_state = models.IntegerField(null=True, blank=True,
choices=ANN_STATES, default=ANN_STATES[1][0], )
@property
def client_name(self):
return self.client
def __str__(self):
return "%s: %s" % ( self.client_name, self.name )
@models.permalink
def get_absolute_url(self):
return ('episode_list', [self.client.slug,self.slug,])
class Meta:
ordering = ["sequence"]
class Raw_File(models.Model):
location = models.ForeignKey(Location)
show = models.ForeignKey(Show)
filename = models.CharField(max_length=135,help_text="filename.dv")
filesize = models.BigIntegerField(default=1,help_text="size in bytes")
start = models.DateTimeField(null=True, blank=True,
help_text='when recorded (should agree with file name and timestamp)')
duration = models.CharField(max_length=11, blank=True, )
end = models.DateTimeField(null=True, blank=True)
trash = models.BooleanField(default=False,
help_text="This clip is trash")
ocrtext = models.TextField(null=True,blank=True)
comment = models.TextField(blank=True)
def __next__(self):
"""
gets the next clip in the room.
"""
rfs = Raw_File.objects.filter(location=self.location,
start__gt=self.start,
).order_by('start','id')
# id__gt=self.id).order_by('start','id')
if rfs:
rf=rfs[0]
else:
rf=None
return rf
def basename(self):
# strip the extension
# good for making 1-2-3/foo.png from 1-2-3/foo.dv
return os.path.splitext(self.filename)[0]
def base_url(self):
""" Returns the url for the file, minus the MEDIA_URL and extension """
return "%s/%s/dv/%s/%s" % (self.show.client.slug,
self.show.slug,
self.location.slug,
self.filename)
def get_start_seconds(self):
return time2s( self.start )
def get_end_seconds(self):
return time2s( self.end )
def get_seconds(self):
# return durration in seconds (float)
delta = self.end - self.start
seconds = delta.days*24*60*60 + delta.seconds
return seconds
def get_minutes(self):
# return durration in minutes (float)
return self.get_seconds()/60.0
def __str__(self):
return self.filename
@models.permalink
def get_absolute_url(self):
return ('raw_file', [self.id,])
class Meta:
ordering = ["start", "location", "filename"]
class Mark(models.Model):
location = models.ForeignKey(Location)
show = models.ForeignKey(Show)
click = models.DateTimeField(
help_text='When Cut was Clicked.')
class Quality(models.Model):
level = models.IntegerField()
name = models.CharField(max_length=35)
description = models.TextField(blank=True)
def __str__(self):
return self.name
STATES=[
(0, 'borked'),
(1, 'edit'), # enter cutlist data
(2, 'encode'), # assemble raw assets into final cut
(3, 'push to queue'), # push to data center box
(4, 'post'), # push to yourube and archive.org
(5, 'richard'), # push urls and description to PyVideo.org
(6, 'review 1'), # staff check to see if they exist on yourube/archive
(7, 'email'), # send private url to presenter, ask for feedback,
(8, 'review 2'), # wait for presenter to say good, or timeout
(9, 'make public'), # flip private to public
(10, 'tweet'), # tell world
(11, 'to-miror'),
(12, 'done')
]
class Episode(models.Model):
show = models.ForeignKey(Show)
location = models.ForeignKey(Location, null=True)
active = models.BooleanField(default=True,
help_text="Turn off to hide from UI.")
state = models.IntegerField(null=True, blank=True,
choices=STATES, default=STATES[1][0],
help_text="" )
locked = models.DateTimeField(null=True, blank=True,
help_text="clear this to unlock")
locked_by = models.CharField(max_length=35, blank=True,
help_text="user/process that locked." )
sequence = models.IntegerField(null=True,blank=True,
help_text="process order")
start = models.DateTimeField(blank=True, null=False,
help_text="initially scheduled time from master, adjusted to match reality")
duration = models.CharField(max_length=15,null=True,blank=True,
help_text="length in hh:mm:ss")
end = models.DateTimeField(blank=True, null=False,
help_text="(calculated if start and duration are set.)")
name = models.CharField(max_length=170,
help_text="Talk title (synced from primary source)")
slug = models.CharField(max_length=170, blank=True, null=False,
help_text="file name friendly version of name")
priority = models.IntegerField(null=True,blank=True,
help_text="lower may not get recorded")
released = models.NullBooleanField(null=True,blank=True,
help_text="has someone authorised pubication")
conf_key = models.CharField(max_length=32, blank=True,
help_text='primary key of event in conference system database.')
conf_url = models.CharField(max_length=335,blank=True,default='',
help_text="Event's details on conference site (name,desc,time,author,files,etc)")
conf_meta = models.TextField(blank=True,default='', null=True,
help_text="Data provided by API")
authors = models.TextField(null=True,blank=True,)
emails = models.TextField(null=True,blank=True,
help_text="email(s) of the presenter(s)")
twitter_id = models.CharField(max_length=135, blank=True, null=True,
help_text="Data provided by API")
language = models.CharField(max_length=20, blank=True, null=True,
help_text="Spoken languge (German, English...)")
edit_key = models.CharField(max_length=32,
blank=True,
null=True,
default = str(random.randint(10000000,99999999)),
help_text="key to allow unauthenticated users to edit this item.")
summary = models.TextField(blank=True, help_text="short", null=True)
description = models.TextField(blank=True, help_text="markdown")
tags = models.CharField(max_length=175,null=True,blank=True,)
normalise = models.CharField(max_length=5,null=True,blank=True, )
channelcopy = models.CharField(max_length=2,null=True,blank=True,
help_text='m=mono, 01=copy left to right, 10=right to left.' )
license = models.CharField(max_length=20, null=True,blank=True,
default='CC BY-SA',
help_text='see http://creativecommons.org/licenses/')
hidden = models.NullBooleanField(null=True,blank=True,
help_text='hidden (does not show up on public episode list')
thumbnail = models.CharField(max_length=135,blank=True,
help_text="filename.png" )
host_url = models.CharField(max_length=235, null=True,blank=True,
help_text = "URL of page video is hosted")
public_url = models.CharField(max_length=335, null=True,blank=True,
help_text = "URL public should use (like pvo or some aggregator")
archive_ogv_url = models.CharField(max_length=355, null=True,blank=True,
help_text = "URL public can use to dl an ogv (like archive.org")
archive_url = models.CharField(max_length=355, null=True,blank=True,
help_text = "not sure.. deprecated?")
archive_mp4_url = models.CharField(max_length=355, null=True,blank=True,
help_text = "URL public can use to dl an mp4. (like archive.org")
rax_mp4_url = models.CharField(max_length=355, null=True,blank=True,
help_text = "URL public can use to get an mp4. (like rackspace cdn")
twitter_url = models.CharField(max_length=135, null=True,blank=True,
help_text = "URL of tweet to email presenters for retweeting")
video_quality = models.ForeignKey(Quality,null=True,blank=True,related_name='video_quality')
audio_quality = models.ForeignKey(Quality,null=True,blank=True,related_name='audio_quality')
comment = models.TextField(blank=True, help_text="production notes")
stop = models.NullBooleanField(
help_text="Stop process.py from processing anymore")
formfield_overrides = {
models.TextField: {
'widget': forms.Textarea({'cols': 80, 'rows': 2}),
}}
@models.permalink
def get_absolute_url(self):
return ('episode', [self.id])
def __str__(self):
return self.name
def cuts_time(self):
# get total time in seoonds of video based on selected cuts.
# or None if there are no clips.
cuts = Cut_List.objects.filter(episode=self, apply=True)
if not cuts:
ret = None
else:
s=0
for cut in cuts:
s+=int(cut.duration()) # durration is in seconds :p
ret = s
return ret
def get_minutes(self):
delta = self.end - self.start
minutes = delta.days*60*24 + delta.seconds/60.0
return int(minutes)
def add_email(self, email):
if self.emails is None: emails=[]
else: emails = self.emails.split(',')
if email not in emails:
if self.emails:
emails.append(email)
self.emails = ','.join(emails)
else:
self.emails = email
self.save()
class Meta:
ordering = ["sequence"]
# unique_together = [("show", "slug")]
class Cut_List(models.Model):
"""
note: this sould be Cut_list_ITEM
because it is not the whole list, just one entry.
"""
raw_file = models.ForeignKey(Raw_File)
episode = models.ForeignKey(Episode)
sequence = models.IntegerField(default=1)
start = models.CharField(max_length=11, blank=True,
help_text='offset from start in HH:MM:SS.ss')
end = models.CharField(max_length=11, blank=True,
help_text='offset from start in HH:MM:SS.ss')
apply = models.BooleanField(default=1)
comment = models.TextField(blank=True)
@models.permalink
def get_absolute_url(self):
return ('episode', [self.episode.id])
def __str__(self):
return "%s - %s" % (self.raw_file, self.episode.name)
class Meta:
ordering = ["sequence"]
def get_start_seconds(self):
return time2s( self.start )
def get_start_wall(self):
if self.start:
return self.raw_file.start + \
datetime.timedelta(seconds=self.get_start_seconds())
else:
return self.raw_file.start
def get_end_seconds(self):
return time2s( self.end )
def get_end_wall(self):
if self.end:
return self.raw_file.start + \
datetime.timedelta(seconds=self.get_end_seconds())
else:
return self.raw_file.end
def duration(self):
# calc size of clip in secconds
# may be size of raw, but take into account trimming start/end
def to_sec(time, default=0):
# convert h:m:s to s
if time:
sec = reduce(lambda x, i: x*60 + i,
list(map(float, time.split(':'))))
else:
sec=default
return sec
start = to_sec( self.start )
end = to_sec( self.end, to_sec(self.raw_file.duration))
dur = end-start
return dur
def duration_hms(self):
seconds = self.duration()
hms = seconds//3600, (seconds%3600)//60, seconds%60
duration = "%02d:%02d:%02d" % hms
return duration
def base_url(self):
""" Returns the url for the file, minus the MEDIA_URL and extension """
return self.raw_file.base_url()
class State(models.Model):
sequence = models.IntegerField(default=1)
slug = models.CharField(max_length=30)
description = models.CharField(max_length=135, blank=True)
class Meta:
ordering = ["sequence"]
def __str__(self):
return self.slug
class Image_File(models.Model):
show = models.ForeignKey(Show)
location = models.ForeignKey(Location, null=True)
episodes = models.ManyToManyField(Episode, blank=True)
filename = models.CharField(max_length=135, help_text="foo.png")
text = models.TextField(blank=True, help_text="OCRed text")
class Log(models.Model):
episode = models.ForeignKey(Episode)
state = models.ForeignKey(State, null=True, blank=True)
ready = models.DateTimeField()
start = models.DateTimeField(null=True, blank=True)
end = models.DateTimeField(null=True, blank=True)
user = models.CharField(max_length=50)
result = models.CharField(max_length=250)
def duration(self):
if self.start and self.end:
dur = self.end - self.start
dur = datetime.timedelta(dur.days,dur.seconds)
return dur
else:
return None
def set_slug(sender, instance, **kwargs):
if not instance.slug or instance.slug is None:
# instance.slug = fnify(instance.name)
return unique_slugify(instance, instance.name)
def set_end(sender, instance, **kwargs):
if instance.start:
if instance.duration:
seconds = reduce(lambda x, i: x*60 + i,
list(map(float, instance.duration.split(':'))))
instance.end = instance.start + \
datetime.timedelta(seconds=seconds)
elif instance.end:
# calc duration based on End
d = instance.end - instance.start
seconds = d.total_seconds()
hms = seconds//3600, (seconds%3600)//60, seconds%60
instance.duration = "%02d:%02d:%02d" % hms
else:
instance.end = None
else:
instance.end = None
pre_save.connect(set_slug,sender=Location)
pre_save.connect(set_slug,sender=Episode)
pre_save.connect(set_end,sender=Episode)
pre_save.connect(set_end,sender=Raw_File)
|
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import logging
import optparse
import os
import random
import sys
import tempfile
import time
from telemetry import decorators
from telemetry.core import browser_finder
from telemetry.core import browser_info
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry.core.platform.profiler import profiler_finder
from telemetry.page import cloud_storage
from telemetry.page import page_filter
from telemetry.page import page_runner_repeat
from telemetry.page import page_test
from telemetry.page.actions import navigate
from telemetry.page.actions import page_action
from telemetry.results import results_options
from telemetry.util import exception_formatter
class _RunState(object):
def __init__(self):
self.browser = None
self._append_to_existing_wpr = False
self._last_archive_path = None
self._first_browser = True
self.first_page = collections.defaultdict(lambda: True)
self.profiler_dir = None
self.repeat_state = None
def StartBrowserIfNeeded(self, test, page_set, page, possible_browser,
credentials_path, archive_path, finder_options):
started_browser = not self.browser
# Create a browser.
if not self.browser:
test.CustomizeBrowserOptionsForSinglePage(page, finder_options)
self.browser = possible_browser.Create()
self.browser.credentials.credentials_path = credentials_path
# Set up WPR path on the new browser.
self.browser.SetReplayArchivePath(archive_path,
self._append_to_existing_wpr,
page_set.make_javascript_deterministic)
self._last_archive_path = page.archive_path
test.WillStartBrowser(self.browser)
self.browser.Start()
test.DidStartBrowser(self.browser)
if self._first_browser:
self._first_browser = False
self.browser.credentials.WarnIfMissingCredentials(page_set)
logging.info('OS: %s %s',
self.browser.platform.GetOSName(),
self.browser.platform.GetOSVersionName())
if self.browser.supports_system_info:
system_info = self.browser.GetSystemInfo()
if system_info.model_name:
logging.info('Model: %s', system_info.model_name)
if system_info.gpu:
for i, device in enumerate(system_info.gpu.devices):
logging.info('GPU device %d: %s', i, device)
if system_info.gpu.aux_attributes:
logging.info('GPU Attributes:')
for k, v in sorted(system_info.gpu.aux_attributes.iteritems()):
logging.info(' %-20s: %s', k, v)
if system_info.gpu.feature_status:
logging.info('Feature Status:')
for k, v in sorted(system_info.gpu.feature_status.iteritems()):
logging.info(' %-20s: %s', k, v)
if system_info.gpu.driver_bug_workarounds:
logging.info('Driver Bug Workarounds:')
for workaround in system_info.gpu.driver_bug_workarounds:
logging.info(' %s', workaround)
else:
logging.info('No GPU devices')
else:
# Set up WPR path if it changed.
if page.archive_path and self._last_archive_path != page.archive_path:
self.browser.SetReplayArchivePath(
page.archive_path,
self._append_to_existing_wpr,
page_set.make_javascript_deterministic)
self._last_archive_path = page.archive_path
if self.browser.supports_tab_control and test.close_tabs_before_run:
# Create a tab if there's none.
if len(self.browser.tabs) == 0:
self.browser.tabs.New()
# Ensure only one tab is open, unless the test is a multi-tab test.
if not test.is_multi_tab_test:
while len(self.browser.tabs) > 1:
self.browser.tabs[-1].Close()
# Must wait for tab to commit otherwise it can commit after the next
# navigation has begun and RenderFrameHostManager::DidNavigateMainFrame()
# will cancel the next navigation because it's pending. This manifests as
# the first navigation in a PageSet freezing indefinitly because the
# navigation was silently cancelled when |self.browser.tabs[0]| was
# committed. Only do this when we just started the browser, otherwise
# there are cases where previous pages in a PageSet never complete
# loading so we'll wait forever.
if started_browser:
self.browser.tabs[0].WaitForDocumentReadyStateToBeComplete()
def StopBrowser(self):
if self.browser:
self.browser.Close()
self.browser = None
# Restarting the state will also restart the wpr server. If we're
# recording, we need to continue adding into the same wpr archive,
# not overwrite it.
self._append_to_existing_wpr = True
def StartProfiling(self, page, finder_options):
if not self.profiler_dir:
self.profiler_dir = tempfile.mkdtemp()
output_file = os.path.join(self.profiler_dir, page.file_safe_name)
is_repeating = (finder_options.page_repeat != 1 or
finder_options.pageset_repeat != 1)
if is_repeating:
output_file = util.GetSequentialFileName(output_file)
self.browser.StartProfiling(finder_options.profiler, output_file)
def StopProfiling(self):
if self.browser:
self.browser.StopProfiling()
class PageState(object):
def __init__(self, page, tab):
self.page = page
self.tab = tab
self._did_login = False
def PreparePage(self, test=None):
if self.page.is_file:
server_started = self.tab.browser.SetHTTPServerDirectories(
self.page.page_set.serving_dirs | set([self.page.serving_dir]))
if server_started and test:
test.DidStartHTTPServer(self.tab)
if self.page.credentials:
if not self.tab.browser.credentials.LoginNeeded(
self.tab, self.page.credentials):
raise page_test.Failure('Login as ' + self.page.credentials + ' failed')
self._did_login = True
if test:
if test.clear_cache_before_each_run:
self.tab.ClearCache(force=True)
def ImplicitPageNavigation(self, test=None):
"""Executes the implicit navigation that occurs for every page iteration.
This function will be called once per page before any actions are executed.
"""
if test:
test.WillNavigateToPage(self.page, self.tab)
test.RunNavigateSteps(self.page, self.tab)
test.DidNavigateToPage(self.page, self.tab)
else:
i = navigate.NavigateAction()
i.RunAction(self.page, self.tab, None)
def CleanUpPage(self, test):
test.CleanUpAfterPage(self.page, self.tab)
if self.page.credentials and self._did_login:
self.tab.browser.credentials.LoginNoLongerNeeded(
self.tab, self.page.credentials)
def AddCommandLineArgs(parser):
page_filter.PageFilter.AddCommandLineArgs(parser)
results_options.AddResultsOptions(parser)
# Page set options
group = optparse.OptionGroup(parser, 'Page set ordering and repeat options')
group.add_option('--pageset-shuffle', action='store_true',
dest='pageset_shuffle',
help='Shuffle the order of pages within a pageset.')
group.add_option('--pageset-shuffle-order-file',
dest='pageset_shuffle_order_file', default=None,
help='Filename of an output of a previously run test on the current '
'pageset. The tests will run in the same order again, overriding '
'what is specified by --page-repeat and --pageset-repeat.')
group.add_option('--page-repeat', default=1, type='int',
help='Number of times to repeat each individual page '
'before proceeding with the next page in the pageset.')
group.add_option('--pageset-repeat', default=1, type='int',
help='Number of times to repeat the entire pageset.')
parser.add_option_group(group)
# WPR options
group = optparse.OptionGroup(parser, 'Web Page Replay options')
group.add_option('--use-live-sites',
dest='use_live_sites', action='store_true',
help='Run against live sites and ignore the Web Page Replay archives.')
parser.add_option_group(group)
def ProcessCommandLineArgs(parser, args):
page_filter.PageFilter.ProcessCommandLineArgs(parser, args)
# Page set options
if args.pageset_shuffle_order_file and not args.pageset_shuffle:
parser.error('--pageset-shuffle-order-file requires --pageset-shuffle.')
if args.page_repeat < 1:
parser.error('--page-repeat must be a positive integer.')
if args.pageset_repeat < 1:
parser.error('--pageset-repeat must be a positive integer.')
def _PrepareAndRunPage(test, page_set, expectations, finder_options,
browser_options, page, credentials_path,
possible_browser, results, state):
if finder_options.use_live_sites:
browser_options.wpr_mode = wpr_modes.WPR_OFF
elif browser_options.wpr_mode != wpr_modes.WPR_RECORD:
browser_options.wpr_mode = (
wpr_modes.WPR_REPLAY
if page.archive_path and os.path.isfile(page.archive_path)
else wpr_modes.WPR_OFF)
tries = test.attempts
while tries:
tries -= 1
try:
results_for_current_run = copy.copy(results)
results_for_current_run.StartTest(page)
if test.RestartBrowserBeforeEachPage() or page.startup_url:
state.StopBrowser()
# If we are restarting the browser for each page customize the per page
# options for just the current page before starting the browser.
state.StartBrowserIfNeeded(test, page_set, page, possible_browser,
credentials_path, page.archive_path,
finder_options)
if not page.CanRunOnBrowser(browser_info.BrowserInfo(state.browser)):
logging.info('Skip test for page %s because browser is not supported.'
% page.url)
results_for_current_run.StopTest(page)
return results
expectation = expectations.GetExpectationForPage(state.browser, page)
_WaitForThermalThrottlingIfNeeded(state.browser.platform)
if finder_options.profiler:
state.StartProfiling(page, finder_options)
try:
_RunPage(test, page, state, expectation,
results_for_current_run, finder_options)
_CheckThermalThrottling(state.browser.platform)
except exceptions.TabCrashException as e:
if test.is_multi_tab_test:
logging.error('Aborting multi-tab test after tab %s crashed',
page.url)
raise
logging.warning(e)
state.StopBrowser()
if finder_options.profiler:
state.StopProfiling()
if (test.StopBrowserAfterPage(state.browser, page)):
state.StopBrowser()
results_for_current_run.StopTest(page)
if state.first_page[page]:
state.first_page[page] = False
if test.discard_first_result:
return results
return results_for_current_run
except exceptions.BrowserGoneException as e:
state.StopBrowser()
if not tries:
logging.error('Aborting after too many retries')
raise
if test.is_multi_tab_test:
logging.error('Aborting multi-tab test after browser crashed')
raise
logging.warning(e)
def _UpdatePageSetArchivesIfChanged(page_set):
# Attempt to download the credentials file.
if page_set.credentials_path:
try:
cloud_storage.GetIfChanged(
os.path.join(page_set.base_dir, page_set.credentials_path))
except (cloud_storage.CredentialsError, cloud_storage.PermissionError,
cloud_storage.CloudStorageError) as e:
logging.warning('Cannot retrieve credential file %s due to cloud storage '
'error %s', page_set.credentials_path, str(e))
# Scan every serving directory for .sha1 files
# and download them from Cloud Storage. Assume all data is public.
all_serving_dirs = page_set.serving_dirs.copy()
# Add individual page dirs to all serving dirs.
for page in page_set:
if page.is_file:
all_serving_dirs.add(page.serving_dir)
# Scan all serving dirs.
for serving_dir in all_serving_dirs:
if os.path.splitdrive(serving_dir)[1] == '/':
raise ValueError('Trying to serve root directory from HTTP server.')
for dirpath, _, filenames in os.walk(serving_dir):
for filename in filenames:
path, extension = os.path.splitext(
os.path.join(dirpath, filename))
if extension != '.sha1':
continue
cloud_storage.GetIfChanged(path)
def Run(test, page_set, expectations, finder_options):
"""Runs a given test against a given page_set with the given options."""
results = results_options.PrepareResults(test, finder_options)
test.ValidatePageSet(page_set)
# Create a possible_browser with the given options.
try:
possible_browser = browser_finder.FindBrowser(finder_options)
except browser_finder.BrowserTypeRequiredException, e:
sys.stderr.write(str(e) + '\n')
sys.exit(-1)
if not possible_browser:
sys.stderr.write(
'No browser found. Available browsers:\n' +
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(finder_options)) +
'\n')
sys.exit(-1)
browser_options = possible_browser.finder_options.browser_options
browser_options.browser_type = possible_browser.browser_type
browser_options.platform = possible_browser.platform
test.CustomizeBrowserOptions(browser_options)
if not decorators.IsEnabled(
test, browser_options.browser_type, browser_options.platform):
return results
# Reorder page set based on options.
pages = _ShuffleAndFilterPageSet(page_set, finder_options)
if (not finder_options.use_live_sites and
browser_options.wpr_mode != wpr_modes.WPR_RECORD):
_UpdatePageSetArchivesIfChanged(page_set)
pages = _CheckArchives(page_set, pages, results)
# Verify credentials path.
credentials_path = None
if page_set.credentials_path:
credentials_path = os.path.join(os.path.dirname(page_set.file_path),
page_set.credentials_path)
if not os.path.exists(credentials_path):
credentials_path = None
# Set up user agent.
browser_options.browser_user_agent_type = page_set.user_agent_type or None
if finder_options.profiler:
profiler_class = profiler_finder.FindProfiler(finder_options.profiler)
profiler_class.CustomizeBrowserOptions(browser_options.browser_type,
finder_options)
for page in list(pages):
if not test.CanRunForPage(page):
logging.debug('Skipping test: it cannot run for %s', page.url)
results.AddSkip(page, 'Test cannot run')
pages.remove(page)
if not pages:
return results
state = _RunState()
# TODO(dtu): Move results creation and results_for_current_run into RunState.
try:
test.WillRunTest(finder_options)
state.repeat_state = page_runner_repeat.PageRunnerRepeatState(
finder_options)
state.repeat_state.WillRunPageSet()
while state.repeat_state.ShouldRepeatPageSet() and not test.IsExiting():
for page in pages:
state.repeat_state.WillRunPage()
test.WillRunPageRepeats(page)
while state.repeat_state.ShouldRepeatPage():
results = _PrepareAndRunPage(
test, page_set, expectations, finder_options, browser_options,
page, credentials_path, possible_browser, results, state)
state.repeat_state.DidRunPage()
test.DidRunPageRepeats(page)
if (not test.max_failures is None and
len(results.failures) > test.max_failures):
logging.error('Too many failures. Aborting.')
test.RequestExit()
if (not test.max_errors is None and
len(results.errors) > test.max_errors):
logging.error('Too many errors. Aborting.')
test.RequestExit()
if test.IsExiting():
break
state.repeat_state.DidRunPageSet()
test.DidRunTest(state.browser, results)
finally:
state.StopBrowser()
return results
def _ShuffleAndFilterPageSet(page_set, finder_options):
if finder_options.pageset_shuffle_order_file:
return page_set.ReorderPageSet(finder_options.pageset_shuffle_order_file)
pages = [page for page in page_set.pages[:]
if not page.disabled and page_filter.PageFilter.IsSelected(page)]
if finder_options.pageset_shuffle:
random.Random().shuffle(pages)
return pages
def _CheckArchives(page_set, pages, results):
"""Returns a subset of pages that are local or have WPR archives.
Logs warnings if any are missing."""
page_set_has_live_sites = False
for page in pages:
if not page.is_local:
page_set_has_live_sites = True
break
# Potential problems with the entire page set.
if page_set_has_live_sites:
if not page_set.archive_data_file:
logging.warning('The page set is missing an "archive_data_file" '
'property. Skipping any live sites. To include them, '
'pass the flag --use-live-sites.')
if not page_set.wpr_archive_info:
logging.warning('The archive info file is missing. '
'To fix this, either add svn-internal to your '
'.gclient using http://goto/read-src-internal, '
'or create a new archive using record_wpr.')
# Potential problems with individual pages.
pages_missing_archive_path = []
pages_missing_archive_data = []
for page in pages:
if page.is_local:
continue
if not page.archive_path:
pages_missing_archive_path.append(page)
elif not os.path.isfile(page.archive_path):
pages_missing_archive_data.append(page)
if pages_missing_archive_path:
logging.warning('The page set archives for some pages do not exist. '
'Skipping those pages. To fix this, record those pages '
'using record_wpr. To ignore this warning and run '
'against live sites, pass the flag --use-live-sites.')
if pages_missing_archive_data:
logging.warning('The page set archives for some pages are missing. '
'Someone forgot to check them in, or they were deleted. '
'Skipping those pages. To fix this, record those pages '
'using record_wpr. To ignore this warning and run '
'against live sites, pass the flag --use-live-sites.')
for page in pages_missing_archive_path + pages_missing_archive_data:
results.StartTest(page)
results.AddErrorMessage(page, 'Page set archive doesn\'t exist.')
results.StopTest(page)
return [page for page in pages if page not in
pages_missing_archive_path + pages_missing_archive_data]
def _RunPage(test, page, state, expectation, results, finder_options):
if expectation == 'skip':
logging.debug('Skipping test: Skip expectation for %s', page.url)
results.AddSkip(page, 'Skipped by test expectations')
return
logging.info('Running %s', page.url)
page_state = PageState(page, test.TabForPage(page, state.browser))
def ProcessError():
if expectation == 'fail':
msg = 'Expected exception while running %s' % page.url
results.AddSuccess(page)
else:
msg = 'Exception while running %s' % page.url
results.AddError(page, sys.exc_info())
exception_formatter.PrintFormattedException(msg=msg)
try:
page_state.PreparePage(test)
if state.repeat_state.ShouldNavigate(
finder_options.skip_navigate_on_repeat):
page_state.ImplicitPageNavigation(test)
test.RunPage(page, page_state.tab, results)
util.CloseConnections(page_state.tab)
except page_test.TestNotSupportedOnPlatformFailure:
raise
except page_test.Failure:
if expectation == 'fail':
exception_formatter.PrintFormattedException(
msg='Expected failure while running %s' % page.url)
results.AddSuccess(page)
else:
exception_formatter.PrintFormattedException(
msg='Failure while running %s' % page.url)
results.AddFailure(page, sys.exc_info())
except (util.TimeoutException, exceptions.LoginException,
exceptions.ProfilingException):
ProcessError()
except (exceptions.TabCrashException, exceptions.BrowserGoneException):
ProcessError()
# Run() catches these exceptions to relaunch the tab/browser, so re-raise.
raise
except page_action.PageActionNotSupported as e:
results.AddSkip(page, 'Unsupported page action: %s' % e)
except Exception:
exception_formatter.PrintFormattedException(
msg='Unhandled exception while running %s' % page.url)
results.AddFailure(page, sys.exc_info())
else:
if expectation == 'fail':
logging.warning('%s was expected to fail, but passed.\n', page.url)
results.AddSuccess(page)
finally:
page_state.CleanUpPage(test)
def _WaitForThermalThrottlingIfNeeded(platform):
if not platform.CanMonitorThermalThrottling():
return
thermal_throttling_retry = 0
while (platform.IsThermallyThrottled() and
thermal_throttling_retry < 3):
logging.warning('Thermally throttled, waiting (%d)...',
thermal_throttling_retry)
thermal_throttling_retry += 1
time.sleep(thermal_throttling_retry * 2)
if thermal_throttling_retry and platform.IsThermallyThrottled():
logging.warning('Device is thermally throttled before running '
'performance tests, results will vary.')
def _CheckThermalThrottling(platform):
if not platform.CanMonitorThermalThrottling():
return
if platform.HasBeenThermallyThrottled():
logging.warning('Device has been thermally throttled during '
'performance tests, results will vary.')
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
"""
Interface with command line GULP.
http://projects.ivec.org
WARNING: you need to have GULP installed on your system.
"""
__author__ = "Bharat Medasani, Wenhao Sun"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Bharat Medasani"
__email__ = "bkmedasani@lbl.gov,wenhao@mit.edu"
__status__ = "Production"
__date__ = "$Jun 22, 2013M$"
import subprocess
import os
import re
from pymatgen.core.periodic_table import Element
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.analysis.bond_valence import BVAnalyzer
from six.moves import map
from six.moves import zip
_anions = set(map(Element, ["O", "S", "F", "Cl", "Br", "N", "P"]))
_cations = set(map(Element, [
"Li", "Na", "K", # alkali metals
"Be", "Mg", "Ca", # alkaline metals
"Al", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ge", "As",
"Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In", "Sn", "Sb",
"Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au", "Hg", "Tl", "Pb", "Bi",
"La", "Ce", "Pr", "Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er",
"Tm", "Yb", "Lu"
]))
_gulp_kw = {
#Control of calculation type
"angle", "bond", "cosmo", "cosmic", "cost", "defect", "distance",
"eem", "efg", "fit", "free_energy", "gasteiger", "genetic",
"gradients", "md", "montecarlo", "noautobond", "noenergy", "optimise",
"pot", "predict", "preserve_Q", "property", "phonon", "qeq", "qbond",
"single", "sm", "static_first", "torsion", "transition_state",
#Geometric variable specification
"breathe", "bulk_noopt", "cellonly", "conp", "conv", "isotropic",
"orthorhombic", "nobreathe", "noflgs", "shell", "unfix",
#Algorithm
"c6", "dipole", "fbfgs", "fix_molecule", "full", "hill", "kfull",
"marvinSE", "madelung", "minimum_image", "molecule", "molmec", "molq",
"newda", "noanisotropic_2b", "nod2sym", "nodsymmetry",
"noelectrostatics", "noexclude", "nofcentral", "nofirst_point",
"noksymmetry", "nolist_md", "nomcediff", "nonanal", "noquicksearch",
"noreal", "norecip", "norepulsive", "nosasinitevery", "nosderv",
"nozeropt", "numerical", "qiter", "qok", "spatial", "storevectors",
"nomolecularinternalke", "voight", "zsisa",
#Optimisation method
"conjugate", "dfp", "lbfgs", "numdiag", "positive", "rfo", "unit",
#Output control
"average", "broaden_dos", "cartesian", "compare", "conserved",
"dcharge", "dynamical_matrix",
"eigenvectors", "global", "hessian", "hexagonal", "intensity", "linmin",
"meanke", "nodensity_out", "nodpsym", "nofirst_point", "nofrequency",
"nokpoints", "operators", "outcon", "prt_eam", "prt_two",
"prt_regi_before", "qsas", "restore", "save", "terse",
#Structure control
"full", "hexagonal", "lower_symmetry", "nosymmetry",
#PDF control
"PDF", "PDFcut", "PDFbelow", "PDFkeep", "coreinfo", "nowidth", "nopartial",
#Miscellaneous
"nomodcoord", "oldunits", "zero_potential"
}
class GulpIO(object):
"""
To generate GULP input and process output
"""
def keyword_line(self, *args):
"""
Checks if the input args are proper gulp keywords and
generates the 1st line of gulp input. Full keywords are expected.
Args:
\\*args: 1st line keywords
"""
#if len(list(filter(lambda x: x in _gulp_kw, args))) != len(args):
# raise GulpError("Wrong keywords given")
gin = " ".join(args)
gin += "\n"
return gin
def structure_lines(self, structure, cell_flg=True, frac_flg=True,
anion_shell_flg=True, cation_shell_flg=False,
symm_flg=True):
"""
Generates GULP input string corresponding to pymatgen structure.
Args:
structure: pymatgen Structure object
cell_flg (default = True): Option to use lattice parameters.
fractional_flg (default = True): If True, fractional coordinates
are used. Else, cartesian coodinates in Angstroms are used.
******
GULP convention is to use fractional coordinates for periodic
structures and cartesian coordinates for non-periodic
structures.
******
anion_shell_flg (default = True): If True, anions are considered
polarizable.
cation_shell_flg (default = False): If True, cations are
considered polarizable.
symm_flg (default = True): If True, symmetry information is also
written.
Returns:
string containing structure for GULP input
"""
gin = ""
if cell_flg:
gin += "cell\n"
l = structure.lattice
lat_str = [str(i) for i in [l.a, l.b, l.c, l.alpha, l.beta,
l.gamma]]
gin += " ".join(lat_str) + "\n"
if frac_flg:
gin += "frac\n"
coord_attr = "frac_coords"
else:
gin += "cart\n"
coord_attr = "coords"
for site in structure.sites:
coord = [str(i) for i in getattr(site, coord_attr)]
specie = site.specie
core_site_desc = specie.symbol + " core " + " ".join(coord) + "\n"
gin += core_site_desc
if ((specie in _anions and anion_shell_flg) or
(specie in _cations and cation_shell_flg)):
shel_site_desc = specie.symbol + " shel " + " ".join(
coord) + "\n"
gin += shel_site_desc
else:
pass
if symm_flg:
gin += "space\n"
<<<<<<< HEAD
gin += str(SpacegroupAnalyzer(structure).get_spacegroup_number()) + "\n"
=======
gin += str(SpacegroupAnalyzer(structure).get_space_group_number()) + "\n"
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
return gin
def specie_potential_lines(self, structure, potential, **kwargs):
"""
Generates GULP input specie and potential string for pymatgen
structure.
Args:
structure: pymatgen.core.structure.Structure object
potential: String specifying the type of potential used
\\*\\*kwargs: Additional parameters related to potential. For
potential == "buckingham",
anion_shell_flg (default = False):
If True, anions are considered polarizable.
anion_core_chrg=float
anion_shell_chrg=float
cation_shell_flg (default = False):
If True, cations are considered polarizable.
cation_core_chrg=float
cation_shell_chrg=float
Returns:
string containing specie and potential specification for gulp
input.
"""
raise NotImplementedError("gulp_specie_potential not yet implemented."
"\nUse library_line instead")
def library_line(self, file_name):
"""
Specifies GULP library file to read species and potential parameters.
If using library don't specify species and potential
in the input file and vice versa. Make sure the elements of
structure are in the library file.
Args:
file_name: Name of GULP library file
Returns:
GULP input string specifying library option
"""
gulplib_set = lambda: 'GULP_LIB' in os.environ.keys()
readable = lambda f: os.path.isfile(f) and os.access(f, os.R_OK)
#dirpath, fname = os.path.split(file_name)
#if dirpath: # Full path specified
# if readable(file_name):
# gin = 'library ' + file_name
# else:
# raise GulpError('GULP Library not found')
#else:
# fpath = os.path.join(os.getcwd(), file_name) # Check current dir
# if readable(fpath):
# gin = 'library ' + fpath
# elif gulplib_set():
# fpath = os.path.join(os.environ['GULP_LIB'], file_name)
# if readable(fpath):
# gin = 'library ' + file_name
# else:
# raise GulpError('GULP Library not found')
# else:
# raise GulpError('GULP Library not found')
#gin += "\n"
#return gin
gin = ""
dirpath, fname = os.path.split(file_name)
if dirpath and readable(file_name): # Full path specified
gin = 'library ' + file_name
else:
fpath = os.path.join(os.getcwd(), file_name) # Check current dir
if readable(fpath):
gin = 'library ' + fpath
elif gulplib_set(): # Check the GULP_LIB path
fpath = os.path.join(os.environ['GULP_LIB'], file_name)
if readable(fpath):
gin = 'library ' + file_name
if gin:
return gin + "\n"
else:
raise GulpError('GULP Library not found')
def buckingham_input(self, structure, keywords, library=None,
uc=True, valence_dict=None):
"""
Gets a GULP input for an oxide structure and buckingham potential
from library.
Args:
structure: pymatgen.core.structure.Structure
keywords: GULP first line keywords.
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
valence_dict: {El: valence}
"""
gin = self.keyword_line(*keywords)
gin += self.structure_lines(structure, symm_flg=not uc)
if not library:
gin += self.buckingham_potential(structure, valence_dict)
else:
gin += self.library_line(library)
return gin
def buckingham_potential(self, structure, val_dict=None):
"""
Generate species, buckingham, and spring options for an oxide structure
using the parameters in default libraries.
Ref:
1. G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys.,
18, 1149-1161 (1985)
2. T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle,
J. Mater Chem., 4, 831-837 (1994)
Args:
structure: pymatgen.core.structure.Structure
val_dict (Needed if structure is not charge neutral): {El:valence}
dict, where El is element.
"""
if not val_dict:
try:
#If structure is oxidation state decorated, use that first.
el = [site.specie.symbol for site in structure]
valences = [site.specie.oxi_state for site in structure]
val_dict = dict(zip(el, valences))
except AttributeError:
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
val_dict = dict(zip(el, valences))
#Try bush library first
bpb = BuckinghamPotential('bush')
bpl = BuckinghamPotential('lewis')
gin = ""
for key in val_dict.keys():
use_bush = True
el = re.sub(r'[1-9,+,\-]', '', key)
if el not in bpb.species_dict.keys():
use_bush = False
elif val_dict[key] != bpb.species_dict[el]['oxi']:
use_bush = False
if use_bush:
gin += "species \n"
gin += bpb.species_dict[el]['inp_str']
gin += "buckingham \n"
gin += bpb.pot_dict[el]
gin += "spring \n"
gin += bpb.spring_dict[el]
continue
#Try lewis library next if element is not in bush
#use_lewis = True
if el != "O": # For metals the key is "Metal_OxiState+"
k = el + '_' + str(int(val_dict[key])) + '+'
if k not in bpl.species_dict.keys():
#use_lewis = False
raise GulpError("Element {} not in library".format(k))
gin += "species\n"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[k]
else:
gin += "species\n"
k = "O_core"
gin += bpl.species_dict[k]
k = "O_shel"
gin += bpl.species_dict[k]
gin += "buckingham\n"
gin += bpl.pot_dict[key]
gin += 'spring\n'
gin += bpl.spring_dict[key]
return gin
def tersoff_input(self, structure, periodic=False, uc=True, *keywords):
"""
Gets a GULP input with Tersoff potential for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
periodic (Default=False): Flag denoting whether periodic
boundary conditions are used
library (Default=None): File containing the species and potential.
uc (Default=True): Unit Cell Flag.
keywords: GULP first line keywords.
"""
#gin="static noelectrostatics \n "
gin = self.keyword_line(*keywords)
gin += self.structure_lines(
structure, cell_flg=periodic, frac_flg=periodic,
anion_shell_flg=False, cation_shell_flg=False, symm_flg=not uc
)
gin += self.tersoff_potential(structure)
return gin
def tersoff_potential(self, structure):
"""
Generate the species, tersoff potential lines for an oxide structure
Args:
structure: pymatgen.core.structure.Structure
"""
bv = BVAnalyzer()
el = [site.specie.symbol for site in structure]
valences = bv.get_valences(structure)
el_val_dict = dict(zip(el, valences))
gin = "species \n"
qerfstring = "qerfc\n"
for key in el_val_dict.keys():
if key != "O" and el_val_dict[key] % 1 != 0:
raise SystemError("Oxide has mixed valence on metal")
specie_string = key + " core " + str(el_val_dict[key]) + "\n"
gin += specie_string
qerfstring += key + " " + key + " 0.6000 10.0000 \n"
gin += "# noelectrostatics \n Morse \n"
met_oxi_ters = TersoffPotential().data
for key in el_val_dict.keys():
if key != "O":
metal = key + "(" + str(int(el_val_dict[key])) + ")"
ters_pot_str = met_oxi_ters[metal]
gin += ters_pot_str
gin += qerfstring
return gin
def get_energy(self, gout):
energy = None
for line in gout.split("\n"):
if "Total lattice energy" in line and "eV" in line:
energy = line.split()
elif "Non-primitive unit cell" in line and "eV" in line:
energy = line.split()
if energy:
return float(energy[4])
else:
raise GulpError("Energy not found in Gulp output")
def get_relaxed_structure(self, gout):
#Find the structure lines
structure_lines = []
cell_param_lines = []
output_lines = gout.split("\n")
no_lines = len(output_lines)
i = 0
# Compute the input lattice parameters
while i < no_lines:
line = output_lines[i]
if "Full cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[8])
alpha = float(line.split()[11])
line = output_lines[i + 1]
b = float(line.split()[8])
beta = float(line.split()[11])
line = output_lines[i + 2]
c = float(line.split()[8])
gamma = float(line.split()[11])
i += 3
break
elif "Cell parameters" in line:
i += 2
line = output_lines[i]
a = float(line.split()[2])
alpha = float(line.split()[5])
line = output_lines[i + 1]
b = float(line.split()[2])
beta = float(line.split()[5])
line = output_lines[i + 2]
c = float(line.split()[2])
gamma = float(line.split()[5])
i += 3
break
else:
i += 1
while i < no_lines:
line = output_lines[i]
if "Final fractional coordinates of atoms" in line:
# read the site coordinates in the following lines
i += 6
line = output_lines[i]
while line[0:2] != '--':
structure_lines.append(line)
i += 1
line = output_lines[i]
# read the cell parameters
i += 9
line = output_lines[i]
if "Final cell parameters" in line:
i += 3
for del_i in range(6):
line = output_lines[i + del_i]
cell_param_lines.append(line)
break
else:
i += 1
#Process the structure lines
if structure_lines:
sp = []
coords = []
for line in structure_lines:
fields = line.split()
if fields[2] == 'c':
sp.append(fields[1])
coords.append(list(float(x) for x in fields[3:6]))
else:
raise IOError("No structure found")
if cell_param_lines:
a = float(cell_param_lines[0].split()[1])
b = float(cell_param_lines[1].split()[1])
c = float(cell_param_lines[2].split()[1])
alpha = float(cell_param_lines[3].split()[1])
beta = float(cell_param_lines[4].split()[1])
gamma = float(cell_param_lines[5].split()[1])
latt = Lattice.from_parameters(a, b, c, alpha, beta, gamma)
return Structure(latt, sp, coords)
class GulpCaller(object):
"""
Class to run gulp from commandline
"""
def __init__(self, cmd='gulp'):
"""
Initialize with the executable if not in the standard path
Args:
cmd: Command. Defaults to gulp.
"""
def is_exe(f):
return os.path.isfile(f) and os.access(f, os.X_OK)
fpath, fname = os.path.split(cmd)
if fpath:
if is_exe(cmd):
self._gulp_cmd = cmd
return
else:
for path in os.environ['PATH'].split(os.pathsep):
path = path.strip('"')
file = os.path.join(path, cmd)
if is_exe(file):
self._gulp_cmd = file
return
raise GulpError("Executable not found")
def run(self, gin):
"""
Run GULP using the gin as input
Args:
gin: GULP input string
Returns:
gout: GULP output string
"""
#command=["gulp"]
p = subprocess.Popen(
self._gulp_cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate(bytearray(gin, "utf-8"))
out = out.decode("utf-8")
err = err.decode("utf-8")
if "Error" in err or "error" in err:
print(gin)
print("----output_0---------")
print(out)
print("----End of output_0------\n\n\n")
print("----output_1--------")
print(out)
print("----End of output_1------")
raise GulpError(err)
# We may not need this
if "ERROR" in out:
raise GulpError(out)
# Sometimes optimisation may fail to reach convergence
conv_err_string = "Conditions for a minimum have not been satisfied"
if conv_err_string in out:
raise GulpConvergenceError()
gout = ""
for line in out.split("\n"):
gout = gout + line + "\n"
return gout
def get_energy_tersoff(structure, gulp_cmd='gulp'):
"""
Compute the energy of a structure using Tersoff potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.tersoff_input(structure)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_buckingham(structure, gulp_cmd='gulp',
keywords=('optimise', 'conp', 'qok'),
valence_dict=None):
"""
Compute the energy of a structure using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(
structure, keywords, valence_dict=valence_dict
)
gout = gc.run(gin)
return gio.get_energy(gout)
def get_energy_relax_structure_buckingham(structure,
gulp_cmd='gulp',
keywords=('optimise', 'conp'),
valence_dict=None):
"""
Relax a structure and compute the energy using Buckingham potential.
Args:
structure: pymatgen.core.structure.Structure
gulp_cmd: GULP command if not in standard place
keywords: GULP first line keywords
valence_dict: {El: valence}. Needed if the structure is not charge
neutral.
"""
gio = GulpIO()
gc = GulpCaller(gulp_cmd)
gin = gio.buckingham_input(
structure, keywords, valence_dict=valence_dict
)
gout = gc.run(gin)
energy = gio.get_energy(gout)
relax_structure = gio.get_relaxed_structure(gout)
return energy, relax_structure
class GulpError(Exception):
"""
Exception class for GULP.
Raised when the GULP gives an error
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "GulpError : " + self.msg
class GulpConvergenceError(Exception):
"""
Exception class for GULP.
Raised when proper convergence is not reached in Mott-Littleton
defect energy optimisation procedure in GULP
"""
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return self.msg
class BuckinghamPotential(object):
"""
Generate the Buckingham Potential Table from the bush.lib and lewis.lib.
Ref:
T.S.Bush, J.D.Gale, C.R.A.Catlow and P.D. Battle, J. Mater Chem.,
4, 831-837 (1994).
G.V. Lewis and C.R.A. Catlow, J. Phys. C: Solid State Phys., 18,
1149-1161 (1985)
"""
def __init__(self, bush_lewis_flag):
assert bush_lewis_flag in {'bush', 'lewis'}
pot_file = "bush.lib" if bush_lewis_flag == "bush" else "lewis.lib"
<<<<<<< HEAD
with open(os.path.join(os.environ["GULP_LIB"], pot_file), 'rU') as f:
=======
with open(os.path.join(os.environ["GULP_LIB"], pot_file), 'rt') as f:
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
# In lewis.lib there is no shell for cation
species_dict, pot_dict, spring_dict = {}, {}, {}
sp_flg, pot_flg, spring_flg = False, False, False
for row in f:
if row[0] == "#":
continue
if row.split()[0] == "species":
sp_flg, pot_flg, spring_flg = True, False, False
continue
if row.split()[0] == "buckingham":
sp_flg, pot_flg, spring_flg = False, True, False
continue
if row.split()[0] == "spring":
sp_flg, pot_flg, spring_flg = False, False, True
continue
elmnt = row.split()[0]
if sp_flg:
if bush_lewis_flag == "bush":
if elmnt not in species_dict.keys():
species_dict[elmnt] = {'inp_str': '', 'oxi': 0}
species_dict[elmnt]['inp_str'] += row
species_dict[elmnt]['oxi'] += float(row.split()[2])
elif bush_lewis_flag == "lewis":
if elmnt == "O":
if row.split()[1] == "core":
species_dict["O_core"] = row
if row.split()[1] == "shel":
species_dict["O_shel"] = row
else:
metal = elmnt.split('_')[0]
#oxi_state = metaloxi.split('_')[1][0]
species_dict[elmnt] = metal + " core " + \
row.split()[2] + "\n"
continue
if pot_flg:
if bush_lewis_flag == "bush":
pot_dict[elmnt] = row
elif bush_lewis_flag == "lewis":
if elmnt == "O":
pot_dict["O"] = row
else:
metal = elmnt.split('_')[0]
#oxi_state = metaloxi.split('_')[1][0]
pot_dict[elmnt] = metal + " " + " ".join(
row.split()[1:]) + "\n"
continue
if spring_flg:
spring_dict[elmnt] = row
if bush_lewis_flag == "bush":
#Fill the null keys in spring dict with empty strings
for key in pot_dict.keys():
if key not in spring_dict.keys():
spring_dict[key] = ""
self.species_dict = species_dict
self.pot_dict = pot_dict
self.spring_dict = spring_dict
class TersoffPotential(object):
"""
Generate Tersoff Potential Table from "OxideTersoffPotentialentials" file
"""
def __init__(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(module_dir, "OxideTersoffPotentials"), "r") as f:
data = dict()
for row in f:
metaloxi = row.split()[0]
line = row.split(")")
data[metaloxi] = line[1]
self.data = data
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AppServiceEnvironment(Resource):
"""Description of an App Service Environment.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: Resource Name.
:type name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:param type: Resource type.
:type type: str
:param tags: Resource tags.
:type tags: dict
:param app_service_environment_name: Name of the App Service Environment.
:type app_service_environment_name: str
:param app_service_environment_location: Location of the App Service
Environment, e.g. "West US".
:type app_service_environment_location: str
:ivar provisioning_state: Provisioning state of the App Service
Environment. Possible values include: 'Succeeded', 'Failed', 'Canceled',
'InProgress', 'Deleting'
:vartype provisioning_state: str or :class:`ProvisioningState
<azure.mgmt.web.models.ProvisioningState>`
:ivar status: Current status of the App Service Environment. Possible
values include: 'Preparing', 'Ready', 'Scaling', 'Deleting'
:vartype status: str or :class:`HostingEnvironmentStatus
<azure.mgmt.web.models.HostingEnvironmentStatus>`
:param vnet_name: Name of the Virtual Network for the App Service
Environment.
:type vnet_name: str
:param vnet_resource_group_name: Resource group of the Virtual Network.
:type vnet_resource_group_name: str
:param vnet_subnet_name: Subnet of the Virtual Network.
:type vnet_subnet_name: str
:param virtual_network: Description of the Virtual Network.
:type virtual_network: :class:`VirtualNetworkProfile
<azure.mgmt.web.models.VirtualNetworkProfile>`
:param internal_load_balancing_mode: Specifies which endpoints to serve
internally in the Virtual Network for the App Service Environment.
Possible values include: 'None', 'Web', 'Publishing'
:type internal_load_balancing_mode: str or
:class:`InternalLoadBalancingMode
<azure.mgmt.web.models.InternalLoadBalancingMode>`
:param multi_size: Front-end VM size, e.g. "Medium", "Large".
:type multi_size: str
:param multi_role_count: Number of front-end instances.
:type multi_role_count: int
:param worker_pools: Description of worker pools with worker size IDs, VM
sizes, and number of workers in each pool.
:type worker_pools: list of :class:`WorkerPool
<azure.mgmt.web.models.WorkerPool>`
:param ipssl_address_count: Number of IP SSL addresses reserved for the
App Service Environment.
:type ipssl_address_count: int
:ivar database_edition: Edition of the metadata database for the App
Service Environment, e.g. "Standard".
:vartype database_edition: str
:ivar database_service_objective: Service objective of the metadata
database for the App Service Environment, e.g. "S0".
:vartype database_service_objective: str
:ivar upgrade_domains: Number of upgrade domains of the App Service
Environment.
:vartype upgrade_domains: int
:ivar subscription_id: Subscription of the App Service Environment.
:vartype subscription_id: str
:param dns_suffix: DNS suffix of the App Service Environment.
:type dns_suffix: str
:ivar last_action: Last deployment action on the App Service Environment.
:vartype last_action: str
:ivar last_action_result: Result of the last deployment action on the App
Service Environment.
:vartype last_action_result: str
:ivar allowed_multi_sizes: List of comma separated strings describing
which VM sizes are allowed for front-ends.
:vartype allowed_multi_sizes: str
:ivar allowed_worker_sizes: List of comma separated strings describing
which VM sizes are allowed for workers.
:vartype allowed_worker_sizes: str
:ivar maximum_number_of_machines: Maximum number of VMs in the App Service
Environment.
:vartype maximum_number_of_machines: int
:ivar vip_mappings: Description of IP SSL mapping for the App Service
Environment.
:vartype vip_mappings: list of :class:`VirtualIPMapping
<azure.mgmt.web.models.VirtualIPMapping>`
:ivar environment_capacities: Current total, used, and available worker
capacities.
:vartype environment_capacities: list of :class:`StampCapacity
<azure.mgmt.web.models.StampCapacity>`
:param network_access_control_list: Access control list for controlling
traffic to the App Service Environment.
:type network_access_control_list: list of
:class:`NetworkAccessControlEntry
<azure.mgmt.web.models.NetworkAccessControlEntry>`
:ivar environment_is_healthy: True/false indicating whether the App
Service Environment is healthy.
:vartype environment_is_healthy: bool
:ivar environment_status: Detailed message about with results of the last
check of the App Service Environment.
:vartype environment_status: str
:ivar resource_group: Resource group of the App Service Environment.
:vartype resource_group: str
:param front_end_scale_factor: Scale factor for front-ends.
:type front_end_scale_factor: int
:ivar default_front_end_scale_factor: Default Scale Factor for FrontEnds.
:vartype default_front_end_scale_factor: int
:param api_management_account_id: API Management Account associated with
the App Service Environment.
:type api_management_account_id: str
:param suspended: <code>true</code> if the App Service Environment is
suspended; otherwise, <code>false</code>. The environment can be
suspended, e.g. when the management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type suspended: bool
:param dynamic_cache_enabled: True/false indicating whether the App
Service Environment is suspended. The environment can be suspended e.g.
when the management endpoint is no longer available
(most likely because NSG blocked the incoming traffic).
:type dynamic_cache_enabled: bool
:param cluster_settings: Custom settings for changing the behavior of the
App Service Environment.
:type cluster_settings: list of :class:`NameValuePair
<azure.mgmt.web.models.NameValuePair>`
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'database_edition': {'readonly': True},
'database_service_objective': {'readonly': True},
'upgrade_domains': {'readonly': True},
'subscription_id': {'readonly': True},
'last_action': {'readonly': True},
'last_action_result': {'readonly': True},
'allowed_multi_sizes': {'readonly': True},
'allowed_worker_sizes': {'readonly': True},
'maximum_number_of_machines': {'readonly': True},
'vip_mappings': {'readonly': True},
'environment_capacities': {'readonly': True},
'environment_is_healthy': {'readonly': True},
'environment_status': {'readonly': True},
'resource_group': {'readonly': True},
'default_front_end_scale_factor': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'app_service_environment_name': {'key': 'properties.name', 'type': 'str'},
'app_service_environment_location': {'key': 'properties.location', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'},
'status': {'key': 'properties.status', 'type': 'HostingEnvironmentStatus'},
'vnet_name': {'key': 'properties.vnetName', 'type': 'str'},
'vnet_resource_group_name': {'key': 'properties.vnetResourceGroupName', 'type': 'str'},
'vnet_subnet_name': {'key': 'properties.vnetSubnetName', 'type': 'str'},
'virtual_network': {'key': 'properties.virtualNetwork', 'type': 'VirtualNetworkProfile'},
'internal_load_balancing_mode': {'key': 'properties.internalLoadBalancingMode', 'type': 'InternalLoadBalancingMode'},
'multi_size': {'key': 'properties.multiSize', 'type': 'str'},
'multi_role_count': {'key': 'properties.multiRoleCount', 'type': 'int'},
'worker_pools': {'key': 'properties.workerPools', 'type': '[WorkerPool]'},
'ipssl_address_count': {'key': 'properties.ipsslAddressCount', 'type': 'int'},
'database_edition': {'key': 'properties.databaseEdition', 'type': 'str'},
'database_service_objective': {'key': 'properties.databaseServiceObjective', 'type': 'str'},
'upgrade_domains': {'key': 'properties.upgradeDomains', 'type': 'int'},
'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'},
'dns_suffix': {'key': 'properties.dnsSuffix', 'type': 'str'},
'last_action': {'key': 'properties.lastAction', 'type': 'str'},
'last_action_result': {'key': 'properties.lastActionResult', 'type': 'str'},
'allowed_multi_sizes': {'key': 'properties.allowedMultiSizes', 'type': 'str'},
'allowed_worker_sizes': {'key': 'properties.allowedWorkerSizes', 'type': 'str'},
'maximum_number_of_machines': {'key': 'properties.maximumNumberOfMachines', 'type': 'int'},
'vip_mappings': {'key': 'properties.vipMappings', 'type': '[VirtualIPMapping]'},
'environment_capacities': {'key': 'properties.environmentCapacities', 'type': '[StampCapacity]'},
'network_access_control_list': {'key': 'properties.networkAccessControlList', 'type': '[NetworkAccessControlEntry]'},
'environment_is_healthy': {'key': 'properties.environmentIsHealthy', 'type': 'bool'},
'environment_status': {'key': 'properties.environmentStatus', 'type': 'str'},
'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'},
'front_end_scale_factor': {'key': 'properties.frontEndScaleFactor', 'type': 'int'},
'default_front_end_scale_factor': {'key': 'properties.defaultFrontEndScaleFactor', 'type': 'int'},
'api_management_account_id': {'key': 'properties.apiManagementAccountId', 'type': 'str'},
'suspended': {'key': 'properties.suspended', 'type': 'bool'},
'dynamic_cache_enabled': {'key': 'properties.dynamicCacheEnabled', 'type': 'bool'},
'cluster_settings': {'key': 'properties.clusterSettings', 'type': '[NameValuePair]'},
}
def __init__(self, location, name=None, kind=None, type=None, tags=None, app_service_environment_name=None, app_service_environment_location=None, vnet_name=None, vnet_resource_group_name=None, vnet_subnet_name=None, virtual_network=None, internal_load_balancing_mode=None, multi_size=None, multi_role_count=None, worker_pools=None, ipssl_address_count=None, dns_suffix=None, network_access_control_list=None, front_end_scale_factor=None, api_management_account_id=None, suspended=None, dynamic_cache_enabled=None, cluster_settings=None):
super(AppServiceEnvironment, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags)
self.app_service_environment_name = app_service_environment_name
self.app_service_environment_location = app_service_environment_location
self.provisioning_state = None
self.status = None
self.vnet_name = vnet_name
self.vnet_resource_group_name = vnet_resource_group_name
self.vnet_subnet_name = vnet_subnet_name
self.virtual_network = virtual_network
self.internal_load_balancing_mode = internal_load_balancing_mode
self.multi_size = multi_size
self.multi_role_count = multi_role_count
self.worker_pools = worker_pools
self.ipssl_address_count = ipssl_address_count
self.database_edition = None
self.database_service_objective = None
self.upgrade_domains = None
self.subscription_id = None
self.dns_suffix = dns_suffix
self.last_action = None
self.last_action_result = None
self.allowed_multi_sizes = None
self.allowed_worker_sizes = None
self.maximum_number_of_machines = None
self.vip_mappings = None
self.environment_capacities = None
self.network_access_control_list = network_access_control_list
self.environment_is_healthy = None
self.environment_status = None
self.resource_group = None
self.front_end_scale_factor = front_end_scale_factor
self.default_front_end_scale_factor = None
self.api_management_account_id = api_management_account_id
self.suspended = suspended
self.dynamic_cache_enabled = dynamic_cache_enabled
self.cluster_settings = cluster_settings
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import re
import netaddr
import six
from oslo_utils import uuidutils
from django.core.exceptions import ValidationError
from django.core import urlresolvers
from django.forms import fields
from django.forms import forms
from django.forms.utils import flatatt
from django.forms import widgets
from django.template.loader import get_template
from django.utils.encoding import force_text
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import Promise
from django.utils import html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
ip_allowed_symbols_re = re.compile(r'^[a-fA-F0-9:/\.]+$')
IPv4 = 1
IPv6 = 2
class IPField(fields.Field):
"""Form field for entering IP/range values, with validation.
Supports IPv4/IPv6 in the format:
.. xxx.xxx.xxx.xxx
.. xxx.xxx.xxx.xxx/zz
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
.. ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/zz
and all compressed forms. Also the short forms
are supported:
xxx/yy
xxx.xxx/yy
.. attribute:: version
Specifies which IP version to validate,
valid values are 1 (fields.IPv4), 2 (fields.IPv6) or
both - 3 (fields.IPv4 | fields.IPv6).
Defaults to IPv4 (1)
.. attribute:: mask
Boolean flag to validate subnet masks along with IP address.
E.g: 10.0.0.1/32
.. attribute:: mask_range_from
Subnet range limitation, e.g. 16
That means the input mask will be checked to be in the range
16:max_value. Useful to limit the subnet ranges
to A/B/C-class networks.
"""
invalid_format_message = _("Incorrect format for IP address")
invalid_version_message = _("Invalid version for IP address")
invalid_mask_message = _("Invalid subnet mask")
max_v4_mask = 32
max_v6_mask = 128
def __init__(self, *args, **kwargs):
self.mask = kwargs.pop("mask", None)
self.min_mask = kwargs.pop("mask_range_from", 0)
self.version = kwargs.pop('version', IPv4)
super(IPField, self).__init__(*args, **kwargs)
def validate(self, value):
super(IPField, self).validate(value)
if not value and not self.required:
return
try:
if self.mask:
self.ip = netaddr.IPNetwork(value)
else:
self.ip = netaddr.IPAddress(value)
except Exception:
raise ValidationError(self.invalid_format_message)
if not any([self.version & IPv4 > 0 and self.ip.version == 4,
self.version & IPv6 > 0 and self.ip.version == 6]):
raise ValidationError(self.invalid_version_message)
if self.mask:
if self.ip.version == 4 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v4_mask:
raise ValidationError(self.invalid_mask_message)
if self.ip.version == 6 and \
not self.min_mask <= self.ip.prefixlen <= self.max_v6_mask:
raise ValidationError(self.invalid_mask_message)
def clean(self, value):
super(IPField, self).clean(value)
return str(getattr(self, "ip", ""))
class MultiIPField(IPField):
"""Extends IPField to allow comma-separated lists of addresses."""
def validate(self, value):
self.addresses = []
if value:
addresses = value.split(',')
for ip in addresses:
super(MultiIPField, self).validate(ip)
self.addresses.append(ip)
else:
super(MultiIPField, self).validate(value)
def clean(self, value):
super(MultiIPField, self).clean(value)
return str(','.join(getattr(self, "addresses", [])))
class MACAddressField(fields.Field):
"""Form field for entering a MAC address with validation.
Supports all formats known by netaddr.EUI(), for example:
.. xx:xx:xx:xx:xx:xx
.. xx-xx-xx-xx-xx-xx
.. xxxx.xxxx.xxxx
"""
def validate(self, value):
super(MACAddressField, self).validate(value)
if not value:
return
try:
self.mac_address = netaddr.EUI(value)
# NOTE(rubasov): Normalize MAC address to the most usual format.
self.mac_address.dialect = netaddr.mac_unix_expanded
except Exception:
raise ValidationError(_("Invalid MAC Address format"),
code="invalid_mac")
def clean(self, value):
super(MACAddressField, self).clean(value)
return str(getattr(self, "mac_address", ""))
# NOTE(adriant): The Select widget was considerably rewritten in Django 1.11
# and broke our customizations because we relied on the inner workings of
# this widget as it was written. I've opted to move that older variant of the
# select widget here as a custom widget for Horizon, but this should be
# reviewed and replaced in future. We need to move to template based rendering
# for widgets, but that's a big task better done in Queens.
class SelectWidget(widgets.Widget):
"""Custom select widget.
It allows to render data-xxx attributes from choices.
This widget also allows user to specify additional html attributes
for choices.
.. attribute:: data_attrs
Specifies object properties to serialize as
data-xxx attribute. If passed ('id', ),
this will be rendered as:
<option data-id="123">option_value</option>
where 123 is the value of choice_value.id
.. attribute:: transform
A callable used to render the display value
from the option object.
.. attribute:: transform_html_attrs
A callable used to render additional HTML attributes
for the option object. It returns a dictionary
containing the html attributes and their values.
For example, to define a title attribute for the
choices::
helpText = { 'Apple': 'This is a fruit',
'Carrot': 'This is a vegetable' }
def get_title(data):
text = helpText.get(data, None)
if text:
return {'title': text}
else:
return {}
....
....
widget=forms.ThemableSelect( attrs={'class': 'switchable',
'data-slug': 'source'},
transform_html_attrs=get_title )
self.fields[<field name>].choices =
([
('apple','Apple'),
('carrot','Carrot')
])
"""
def __init__(self, attrs=None, choices=(), data_attrs=(), transform=None,
transform_html_attrs=None):
self.choices = list(choices)
self.data_attrs = data_attrs
self.transform = transform
self.transform_html_attrs = transform_html_attrs
super(SelectWidget, self).__init__(attrs)
def render(self, name, value, attrs=None, renderer=None):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [html.format_html('<select{}>', flatatt(final_attrs))]
options = self.render_options([value])
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
other_html = (u' selected="selected"'
if option_value in selected_choices else '')
other_html += self.transform_option_html_attrs(option_label)
data_attr_html = self.get_data_attrs(option_label)
if data_attr_html:
other_html += ' ' + data_attr_html
option_label = self.transform_option_label(option_label)
return u'<option value="%s"%s>%s</option>' % (
html.escape(option_value), other_html, option_label)
def render_options(self, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in self.choices:
if isinstance(option_label, (list, tuple)):
output.append(html.format_html(
'<optgroup label="{}">', force_text(option_value)))
for option in option_label:
output.append(
self.render_option(selected_choices, *option))
output.append('</optgroup>')
else:
output.append(self.render_option(
selected_choices, option_value, option_label))
return '\n'.join(output)
def get_data_attrs(self, option_label):
other_html = []
if not isinstance(option_label, (six.string_types, Promise)):
for data_attr in self.data_attrs:
data_value = html.conditional_escape(
force_text(getattr(option_label,
data_attr, "")))
other_html.append('data-%s="%s"' % (data_attr, data_value))
return ' '.join(other_html)
def transform_option_label(self, option_label):
if (not isinstance(option_label, (six.string_types, Promise)) and
callable(self.transform)):
option_label = self.transform(option_label)
return html.conditional_escape(force_text(option_label))
def transform_option_html_attrs(self, option_label):
if not callable(self.transform_html_attrs):
return ''
return flatatt(self.transform_html_attrs(option_label))
class ThemableSelectWidget(SelectWidget):
"""Bootstrap base select field widget."""
def render(self, name, value, attrs=None, choices=()):
# NOTE(woodnt): Currently the "attrs" contents are being added to the
# select that's hidden. It's unclear whether this is the
# desired behavior. In some cases, the attribute should
# remain solely on the now-hidden select. But in others
# if it should live on the bootstrap button (visible)
# or both.
new_choices = []
initial_value = value
for opt_value, opt_label in itertools.chain(self.choices, choices):
other_html = self.transform_option_html_attrs(opt_label)
data_attr_html = self.get_data_attrs(opt_label)
if data_attr_html:
other_html += ' ' + data_attr_html
opt_label = self.transform_option_label(opt_label)
# If value exists, save off its label for use
if opt_value == value:
initial_value = opt_label
if other_html:
new_choices.append((opt_value, opt_label, other_html))
else:
new_choices.append((opt_value, opt_label))
if value is None and new_choices:
initial_value = new_choices[0][1]
attrs = self.build_attrs(attrs)
id = attrs.pop('id', 'id_%s' % name)
template = get_template('horizon/common/fields/_themable_select.html')
context = {
'name': name,
'options': new_choices,
'id': id,
'value': value,
'initial_value': initial_value,
'select_attrs': attrs,
}
return template.render(context)
class DynamicSelectWidget(SelectWidget):
"""``Select`` widget to handle dynamic changes to the available choices.
A subclass of the ``Select`` widget which renders extra attributes for
use in callbacks to handle dynamic changes to the available choices.
"""
_data_add_url_attr = "data-add-item-url"
def render(self, *args, **kwargs):
add_item_url = self.get_add_item_url()
if add_item_url is not None:
self.attrs[self._data_add_url_attr] = add_item_url
return super(DynamicSelectWidget, self).render(*args, **kwargs)
def get_add_item_url(self):
if callable(self.add_item_link):
return self.add_item_link()
try:
if self.add_item_link_args:
return urlresolvers.reverse(self.add_item_link,
args=self.add_item_link_args)
else:
return urlresolvers.reverse(self.add_item_link)
except urlresolvers.NoReverseMatch:
return self.add_item_link
class ThemableDynamicSelectWidget(ThemableSelectWidget, DynamicSelectWidget):
pass
class ThemableChoiceField(fields.ChoiceField):
"""Bootstrap based select field."""
widget = ThemableSelectWidget
class DynamicChoiceField(fields.ChoiceField):
"""ChoiceField that make dynamically updating its elements easier.
Notably, the field declaration takes an extra argument, ``add_item_link``
which may be a string or callable defining the URL that should be used
for the "add" link associated with the field.
"""
widget = DynamicSelectWidget
def __init__(self,
add_item_link=None,
add_item_link_args=None,
*args,
**kwargs):
super(DynamicChoiceField, self).__init__(*args, **kwargs)
self.widget.add_item_link = add_item_link
self.widget.add_item_link_args = add_item_link_args
class ThemableDynamicChoiceField(DynamicChoiceField):
widget = ThemableDynamicSelectWidget
class DynamicTypedChoiceField(DynamicChoiceField, fields.TypedChoiceField):
"""Simple mix of ``DynamicChoiceField`` and ``TypedChoiceField``."""
pass
class ThemableDynamicTypedChoiceField(ThemableDynamicChoiceField,
fields.TypedChoiceField):
"""Simple mix of ``ThemableDynamicChoiceField`` & ``TypedChoiceField``."""
pass
class ThemableCheckboxInput(widgets.CheckboxInput):
"""Checkbox widget which renders extra markup.
It is used to allow a custom checkbox experience.
"""
def render(self, name, value, attrs=None):
label_for = attrs.get('id', '')
if not label_for:
attrs['id'] = uuidutils.generate_uuid()
label_for = attrs['id']
return html.format_html(
u'<div class="themable-checkbox">{}<label for="{}"></label></div>',
super(ThemableCheckboxInput, self).render(name, value, attrs),
label_for
)
# NOTE(adriant): SubWidget was removed in Django 1.11 and thus has been moved
# to our codebase until we redo how we handle widgets.
@html.html_safe
@python_2_unicode_compatible
class SubWidget(object):
"""SubWidget class from django 1.10.7 codebase
Some widgets are made of multiple HTML elements -- namely, RadioSelect.
This is a class that represents the "inner" HTML element of a widget.
"""
def __init__(self, parent_widget, name, value, attrs, choices):
self.parent_widget = parent_widget
self.name, self.value = name, value
self.attrs, self.choices = attrs, choices
def __str__(self):
args = [self.name, self.value, self.attrs]
if self.choices:
args.append(self.choices)
return self.parent_widget.render(*args)
# NOTE(adriant): ChoiceInput and CheckboxChoiceInput were removed in
# Django 1.11 so ChoiceInput has been moved to our codebase until we redo how
# we handle widgets.
@html.html_safe
@python_2_unicode_compatible
class ChoiceInput(SubWidget):
"""ChoiceInput class from django 1.10.7 codebase
An object used by ChoiceFieldRenderer that represents a single
<input type='$input_type'>.
"""
input_type = None # Subclasses must define this
def __init__(self, name, value, attrs, choice, index):
self.name = name
self.value = value
self.attrs = attrs
self.choice_value = force_text(choice[0])
self.choice_label = force_text(choice[1])
self.index = index
if 'id' in self.attrs:
self.attrs['id'] += "_%d" % self.index
def __str__(self):
return self.render()
def render(self, name=None, value=None, attrs=None):
if self.id_for_label:
label_for = html.format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
# NOTE(adriant): OrderedDict used to make html attrs order
# consistent for testing.
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return html.format_html(
'<label{}>{} {}</label>',
label_for,
self.tag(attrs),
self.choice_label
)
def is_checked(self):
return self.value == self.choice_value
def tag(self, attrs=None):
attrs = attrs or self.attrs
# NOTE(adriant): OrderedDict used to make html attrs order
# consistent for testing.
final_attrs = dict(
attrs,
type=self.input_type,
name=self.name,
value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return html.format_html('<input{} />', flatatt(final_attrs))
@property
def id_for_label(self):
return self.attrs.get('id', '')
# NOTE(adriant): CheckboxChoiceInput was removed in Django 1.11 so this widget
# has been expanded to include the functionality inherieted previously as a
# temporary solution until we redo how we handle widgets.
class ThemableCheckboxChoiceInput(ChoiceInput):
input_type = 'checkbox'
def __init__(self, *args, **kwargs):
super(ThemableCheckboxChoiceInput, self).__init__(*args, **kwargs)
self.value = set(force_text(v) for v in self.value)
def is_checked(self):
return self.choice_value in self.value
def render(self, name=None, value=None, attrs=None, choices=()):
if self.id_for_label:
label_for = html.format_html(' for="{}"', self.id_for_label)
else:
label_for = ''
attrs = dict(self.attrs, **attrs) if attrs else self.attrs
return html.format_html(
u'<div class="themable-checkbox">{}<label{}>' +
u'<span>{}</span></label></div>',
self.tag(attrs), label_for, self.choice_label
)
# NOTE(adriant): CheckboxFieldRenderer was removed in Django 1.11 so
# has been moved here until we redo how we handle widgets.
@html.html_safe
@python_2_unicode_compatible
class CheckboxFieldRenderer(object):
"""CheckboxFieldRenderer class from django 1.10.7 codebase
An object used by RadioSelect to enable customization of radio widgets.
"""
choice_input_class = None
outer_html = '<ul{id_attr}>{content}</ul>'
inner_html = '<li>{choice_value}{sub_widgets}</li>'
def __init__(self, name, value, attrs, choices):
self.name = name
self.value = value
self.attrs = attrs
self.choices = choices
def __getitem__(self, idx):
return list(self)[idx]
def __iter__(self):
for idx, choice in enumerate(self.choices):
yield self.choice_input_class(
self.name, self.value, self.attrs.copy(), choice, idx)
def __str__(self):
return self.render()
def render(self):
"""Outputs a <ul> for this set of choice fields.
If an id was given to the field, it is applied to the <ul> (each
item in the list will get an id of `$id_$i`).
"""
id_ = self.attrs.get('id')
output = []
for i, choice in enumerate(self.choices):
choice_value, choice_label = choice
if isinstance(choice_label, (tuple, list)):
attrs_plus = self.attrs.copy()
if id_:
attrs_plus['id'] += '_{}'.format(i)
sub_ul_renderer = self.__class__(
name=self.name,
value=self.value,
attrs=attrs_plus,
choices=choice_label,
)
sub_ul_renderer.choice_input_class = self.choice_input_class
output.append(html.format_html(
self.inner_html, choice_value=choice_value,
sub_widgets=sub_ul_renderer.render(),
))
else:
w = self.choice_input_class(
self.name, self.value, self.attrs.copy(), choice, i)
output.append(html.format_html(
self.inner_html,
choice_value=force_text(w),
sub_widgets=''))
return html.format_html(
self.outer_html,
id_attr=html.format_html(' id="{}"', id_) if id_ else '',
content=mark_safe('\n'.join(output)),
)
class ThemableCheckboxFieldRenderer(CheckboxFieldRenderer):
choice_input_class = ThemableCheckboxChoiceInput
class ThemableCheckboxSelectMultiple(widgets.CheckboxSelectMultiple):
renderer = ThemableCheckboxFieldRenderer
_empty_value = []
class ExternalFileField(fields.FileField):
"""Special FileField to upload file to some external location.
This is a special flavor of FileField which is meant to be used in cases
when instead of uploading file to Django it should be uploaded to some
external location, while the form validation is done as usual. It should be
paired with ExternalUploadMeta metaclass embedded into the Form class.
"""
def __init__(self, *args, **kwargs):
super(ExternalFileField, self).__init__(*args, **kwargs)
self.widget.attrs.update({'data-external-upload': 'true'})
class ExternalUploadMeta(forms.DeclarativeFieldsMetaclass):
"""Metaclass to process ExternalFileField fields in a specific way.
Set this class as the metaclass of a form that contains ExternalFileField
in order to process ExternalFileField fields in a specific way.
A hidden CharField twin of FieldField is created which
contains just the filename (if any file was selected on browser side) and
a special `clean` method for FileField is defined which extracts just file
name. This allows to avoid actual file upload to Django server, yet
process form clean() phase as usual. Actual file upload happens entirely
on client-side.
"""
def __new__(mcs, name, bases, attrs):
def get_double_name(name):
suffix = '__hidden'
slen = len(suffix)
return name[:-slen] if name.endswith(suffix) else name + suffix
def make_clean_method(field_name):
def _clean_method(self):
value = self.cleaned_data[field_name]
if value:
self.cleaned_data[get_double_name(field_name)] = value
return value
return _clean_method
new_attrs = {}
for attr_name, attr in attrs.items():
new_attrs[attr_name] = attr
if isinstance(attr, ExternalFileField):
hidden_field = fields.CharField(widget=fields.HiddenInput,
required=False)
hidden_field.creation_counter = attr.creation_counter + 1000
new_attr_name = get_double_name(attr_name)
new_attrs[new_attr_name] = hidden_field
meth_name = 'clean_' + new_attr_name
new_attrs[meth_name] = make_clean_method(new_attr_name)
return super(ExternalUploadMeta, mcs).__new__(
mcs, name, bases, new_attrs)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
from oslo.config import cfg
from nova.openstack.common import uuidutils
from nova import test
from nova.tests import fake_libvirt_utils
from nova.tests import fake_utils
from nova.virt.libvirt import imagebackend
CONF = cfg.CONF
class _ImageTestCase(object):
INSTANCES_PATH = '/instances_path'
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.flags(disable_process_locking=True,
instances_path=self.INSTANCES_PATH)
self.INSTANCE = {'name': 'instance',
'uuid': uuidutils.generate_uuid()}
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.OLD_STYLE_INSTANCE_PATH = \
fake_libvirt_utils.get_instance_path(self.INSTANCE, forceold=True)
self.PATH = os.path.join(
fake_libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def test_cache(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
imagebackend.fileutils.ensure_tree(self.TEMPLATE_DIR)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_image_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(True)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_base_dir_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
fn = self.mox.CreateMockAnything()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(imagebackend.fileutils, 'ensure_tree')
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_cache_template_exists(self):
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_DIR).AndReturn(True)
os.path.exists(self.PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(True)
fn = self.mox.CreateMockAnything()
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_utils.fake_execute_clear_log()
fake_utils.stub_out_utils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_utils.fake_execute_get_log(),
['fallocate -n -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
class RawTestCase(_ImageTestCase, test.TestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Raw
super(RawTestCase, self).setUp()
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.lockutils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
self.mox.VerifyAll()
def test_create_image_generated(self):
fn = self.prepare_mocks()
fn(target=self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_extend(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH, image_id=None)
imagebackend.libvirt_utils.copy_image(self.TEMPLATE_PATH, self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE, image_id=None)
self.mox.VerifyAll()
class Qcow2TestCase(_ImageTestCase, test.TestCase):
SIZE = 1024 * 1024 * 1024
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / (1024 * 1024 * 1024)))
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(imagebackend.lockutils.synchronized,
'__call__')
self.mox.StubOutWithMock(imagebackend.libvirt_utils,
'create_cow_image')
self.mox.StubOutWithMock(imagebackend.libvirt_utils, 'copy_image')
self.mox.StubOutWithMock(imagebackend.disk, 'extend')
return fn
def test_create_image(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def test_create_image_with_size(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.mox.StubOutWithMock(os.path, 'exists')
if self.OLD_STYLE_INSTANCE_PATH:
os.path.exists(self.OLD_STYLE_INSTANCE_PATH).AndReturn(False)
os.path.exists(self.TEMPLATE_PATH).AndReturn(False)
os.path.exists(self.PATH).AndReturn(False)
imagebackend.libvirt_utils.create_cow_image(self.TEMPLATE_PATH,
self.PATH)
imagebackend.disk.extend(self.PATH, self.SIZE)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
class LvmTestCase(_ImageTestCase, test.TestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(libvirt_images_volume_group=self.VG)
self.LV = '%s_%s' % (self.INSTANCE['name'], self.NAME)
self.OLD_STYLE_INSTANCE_PATH = None
self.PATH = os.path.join('/dev', self.VG, self.LV)
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
def prepare_mocks(self):
fn = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(self.disk, 'resize2fs')
self.mox.StubOutWithMock(self.libvirt_utils, 'create_lvm_image')
self.mox.StubOutWithMock(self.disk, 'get_disk_size')
self.mox.StubOutWithMock(self.utils, 'execute')
return fn
def _create_image(self, sparse):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.mox.VerifyAll()
def _create_image_generated(self, sparse):
fn = self.prepare_mocks()
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn(target=self.PATH, ephemeral_size=None)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
self.mox.VerifyAll()
def _create_image_resize(self, sparse):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG, self.LV,
self.SIZE, sparse=sparse)
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
cmd = ('qemu-img', 'convert', '-O', 'raw', self.TEMPLATE_PATH,
self.PATH)
self.utils.execute(*cmd, run_as_root=True)
self.disk.resize2fs(self.PATH, run_as_root=True)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(libvirt_sparse_logical_volumes=True)
self._create_image_resize(True)
def test_create_image_negative(self):
fn = self.prepare_mocks()
fn(target=self.TEMPLATE_PATH)
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False
).AndRaise(RuntimeError())
self.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.TEMPLATE_SIZE)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
self.mox.VerifyAll()
def test_create_image_generated_negative(self):
fn = self.prepare_mocks()
fn(target=self.PATH,
ephemeral_size=None).AndRaise(RuntimeError())
self.libvirt_utils.create_lvm_image(self.VG,
self.LV,
self.SIZE,
sparse=False)
self.mox.StubOutWithMock(self.libvirt_utils, 'remove_logical_volumes')
self.libvirt_utils.remove_logical_volumes(self.PATH)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
self.mox.VerifyAll()
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_utils.fake_execute_clear_log()
fake_utils.stub_out_utils_execute(self.stubs)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stubs.Set(os.path, 'exists', lambda _: True)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_utils.fake_execute_get_log(), [])
class BackendTestCase(test.TestCase):
INSTANCE = {'name': 'fake-instance',
'uuid': uuidutils.generate_uuid()}
NAME = 'fake-name.suffix'
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).image(self.INSTANCE,
self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertTrue(isinstance(instance, class_object), failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_raw(self):
self._test_image('raw', imagebackend.Raw, imagebackend.Raw)
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_lvm(self):
self.flags(libvirt_images_volume_group='FakeVG')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
def test_image_default(self):
self._test_image('default', imagebackend.Raw, imagebackend.Qcow2)
|
|
import json
import copy
from itertools import chain
from account.views import SignupView
from django.shortcuts import redirect
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User, Group
from django.contrib.admin.views.decorators import staff_member_required
from django.views.generic import TemplateView, ListView, DetailView, DeleteView, View
from django.views.generic.edit import FormView
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, HttpResponseForbidden, Http404, HttpResponseNotFound
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.db.models import Q
from django.db.models.query import EmptyQuerySet
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from rest_framework.decorators import list_route, detail_route
from rest_framework import viewsets, status
from rest_framework.permissions import IsAuthenticated, IsAdminUser, BasePermission
from django_tables2 import RequestConfig
from .forms import UserProfileForm, UserProfilesForm
from .forms import GroupProfileForm, GroupForm, GroupSearchForm
from .serializers import UserSerializer, UserProfileSerializer, \
get_rand_password, GroupProfileSerializer, UserSlugSerializer, GroupSerializer, \
UserResetSerializer
from .tables import GroupUserTable, GroupTable
from .models import GroupProfile, UserProfile
from .filters import GroupFilter
from .utils import GroupChangePermission, can_change_group
from guardian.shortcuts import get_objects_for_user
from guardian.decorators import permission_required_or_403
import logging
logger = logging.getLogger('django')
class UserChangePermission(BasePermission):
def has_object_permission(self, request, view, obj):
if request.user.is_superuser:
return True
return False
class GroupListView(ListView):
model = GroupProfile
template_name = 'ojuser/group_list.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GroupListView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
qs = super(GroupListView, self).get_queryset()
profiles_can_view = get_objects_for_user(
self.request.user,
'ojuser.view_groupprofile',
with_superuser=True
)
self.group_can_view_qs = profiles_can_view
profiles_can_change = get_objects_for_user(
self.request.user,
'ojuser.change_groupprofile',
with_superuser=True
)
self.group_can_change_qs = profiles_can_change
profiles_can_delete = get_objects_for_user(
self.request.user,
'ojuser.delete_groupprofile',
with_superuser=True
)
self.group_can_delete_qs = profiles_can_delete
# self.filter = GroupFilter(self.request.GET, queryset=qs, user=self.request.user)
self.filter = GroupFilter(self.request.GET, queryset=profiles_can_view, user=self.request.user)
return self.filter.qs
def get_context_data(self, **kwargs):
context = super(GroupListView, self).get_context_data(**kwargs)
groups_table = GroupTable(self.get_queryset())
RequestConfig(self.request).configure(groups_table)
# add filter here
group_search_form = GroupSearchForm()
context["group_search_form"] = group_search_form
context['groups_table'] = groups_table
context['filter'] = self.filter
context['group_can_view'] = self.group_can_view_qs
context['group_can_change'] = self.group_can_change_qs
context['group_can_delete'] = self.group_can_delete_qs
tree_list = []
for u in self.group_can_view_qs:
p_name = '#'
if u.parent:
p_name = str(u.parent.pk)
url = reverse('mygroup-detail', args=[u.pk, ])
tree_list.append({
'id': str(u.pk),
'parent': p_name,
'text': u.nickname,
'state': {
'opened': True,
},
})
context['tree_list'] = json.dumps(tree_list)
return context
class GroupCreateView(TemplateView):
template_name = 'ojuser/group_create_form.html'
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(GroupCreateView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if self.group_profile_form.is_valid() and self.group_admins_form.is_valid():
gg = GroupProfile(**self.group_profile_form.cleaned_data)
# gg.superadmin = self.request.user
gg.save()
GroupForm(request.POST, instance=gg.admin_group).save()
return HttpResponseRedirect(reverse('mygroup-detail', args=[gg.pk, ]))
return super(GroupCreateView, self).render_to_response(context)
def get_context_data(self, **kwargs):
context = super(GroupCreateView, self).get_context_data(**kwargs)
self.group_profile_form = GroupProfileForm(self.request.POST or None)
self.group_admins_form = GroupForm(self.request.POST or None)
# group = GroupProfile.objects.filter(name='root').first()
groups = get_objects_for_user(
user=self.request.user,
perms='ojuser.change_groupprofile',
with_superuser=True)
admin_groups = Group.objects.filter(admin_profile__in=groups).all()
user_groups = Group.objects.filter(user_profile__in=groups).all()
all_user = User.objects.filter(pk=self.request.user.pk).all()
for g in user_groups:
all_user |= g.user_set.all()
all_admin = User.objects.filter(pk=self.request.user.pk).all()
for g in admin_groups:
all_admin |= g.user_set.all()
self.group_profile_form.fields["parent"].queryset = groups
self.group_profile_form.fields['superadmin'].queryset = all_admin.distinct()
self.group_admins_form.fields["admins"].queryset = all_user.distinct()
context["group_profile_form"] = self.group_profile_form
context["group_admins_form"] = self.group_admins_form
return context
class GroupUpdateView(TemplateView):
template_name = 'ojuser/group_update_form.html'
@method_decorator(permission_required_or_403(
'ojuser.change_groupprofile',
(GroupProfile, 'pk', 'pk')
))
def dispatch(self, request, *args, **kwargs):
return super(GroupUpdateView, self).dispatch(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
self.group_profile_form = GroupProfileForm(self.request.POST, instance=self.object)
self.group_admins_form = GroupForm(self.request.POST, instance=self.object.admin_group)
if self.group_profile_form.is_valid() and self.group_admins_form.is_valid():
self.group_profile_form.save()
self.group_admins_form.save()
return HttpResponseRedirect(reverse('mygroup-detail', args=[context['pk'], ]))
return super(GroupUpdateView, self).render_to_response(context)
def get_context_data(self, **kwargs):
context = super(GroupUpdateView, self).get_context_data(**kwargs)
self.pk = self.kwargs['pk']
qs = GroupProfile.objects.all()
self.object = get_object_or_404(qs, pk=self.pk)
self.group_profile_form = GroupProfileForm(instance=self.object)
user_queryset = User.objects.filter(pk__in=self.object.user_group.user_set.all())
self.group_admins_form = GroupForm(instance=self.object.admin_group)
self.group_admins_form.fields['admins'].widget.queryset = user_queryset
context["group_profile_form"] = self.group_profile_form
context["group_admins_form"] = self.group_admins_form
context['pk'] = self.pk
return context
class GroupDeleteView(DeleteView):
model = GroupProfile
template_name = 'ojuser/group_confirm_delete.html'
success_url = reverse_lazy('mygroup-list')
@method_decorator(permission_required_or_403(
'delete_groupprofile',
(GroupProfile, 'pk', 'pk')
))
def dispatch(self, request, *args, **kwargs):
return super(GroupDeleteView, self).dispatch(request, *args, **kwargs)
class UserDeleteView(DeleteView):
model = User
template_name = 'ojuser/user_confirm_delete.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
user = User.objects.filter(pk=int(kwargs.get('pk', -1))).first()
self.group = GroupProfile.objects.get(pk=kwargs.get('group', -1))
if not self.group or not user:
raise PermissionDenied
if self.group.change_by_user(user):
if not request.user.is_superuser and request.user != self.group.superadmin:
raise PermissionDenied
self.user = user
return super(UserDeleteView, self).dispatch(request, *args, **kwargs)
def get_success_url(self):
return reverse('mygroup-detail', kwargs={'pk': self.group.pk})
def delete(self, request, *args, **kwargs):
self.group.user_group.user_set.remove(self.user)
return HttpResponseRedirect(self.get_success_url())
class GroupDetailView(DetailView):
model = GroupProfile
template_name = 'ojuser/group_detail.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GroupDetailView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
qs = super(GroupDetailView, self).get_queryset()
profiles_can_view = get_objects_for_user(
self.request.user,
'ojuser.view_groupprofile',
with_superuser=True
)
self.group_can_view_qs = profiles_can_view
profiles_can_change = get_objects_for_user(
self.request.user,
'ojuser.change_groupprofile',
with_superuser=True
)
self.group_can_change_qs = profiles_can_change
# self.filter = GroupFilter(self.request.GET, queryset=qs, user=self.request.user)
self.filter = GroupFilter(self.request.GET, queryset=profiles_can_view, user=self.request.user)
return self.filter.qs
def get_context_data(self, **kwargs):
context = super(GroupDetailView, self).get_context_data(**kwargs)
context['group_pk'] = context['object'].pk
group = context['object']
print group.get_ancestors()
context['admins'] = group.admin_group.user_set.all()
context['children'] = group.get_children()
group_users = group.user_group.user_set.all()
group_users_table = GroupUserTable(group_users)
RequestConfig(self.request, paginate={'per_page': 35}).configure(group_users_table)
# add filter here
context['group_users_table'] = group_users_table
context['group_can_change'] = self.get_object().change_by_user(self.request.user)
context['can_reset_password'] = self.get_object().can_reset_member_password_by_user(self.request.user)
return context
class GroupMemberView(TemplateView):
# class GroupMemberView(DetailView):
# model = GroupProfile
template_name = 'ojuser/group_members.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(GroupMemberView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(GroupMemberView, self).get_context_data(**kwargs)
context['pk'] = kwargs['pk']
return context
class GroupResetView(DetailView):
template_name = 'ojuser/reset_members.html'
model = GroupProfile
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
group = GroupProfile.objects.get(pk=self.kwargs['pk'])
if not group.can_reset_member_password_by_user(request.user):
raise PermissionDenied
return super(GroupResetView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(GroupResetView, self).get_context_data(**kwargs)
group = context['object']
group_users = group.user_group.user_set.all()
context['users'] = group_users
return context
class UserAddView(TemplateView):
template_name = 'ojuser/user_add.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UserAddView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
profiles_can_change = get_objects_for_user(
self.request.user,
'ojuser.change_groupprofile',
with_superuser=True
)
context = super(UserAddView, self).get_context_data(**kwargs)
context['group_can_change'] = profiles_can_change.all()
if self.request.GET.has_key('group_pk'):
context['select_group'] = int(self.request.GET['group_pk'])
return context
class UserQueryView(TemplateView):
"""
For admins to query someone's profile, groups and permissions
"""
template_name = 'ojuser/user_query.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(UserQueryView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
# TODO: I18N
context = super(UserQueryView, self).get_context_data(**kwargs)
if 'username' in self.request.GET:
username = self.request.GET['username']
try:
user = User.objects.select_related('profile').get(username=username)
except User.DoesNotExist:
context['query_error'] = "User named '{}' does not exist".format(username)
return context
if not self.request.user.is_staff:
context['query_error'] = "You don't have permission to view this user"
return context
joinedGroups = GroupProfile.objects.filter(user_group__user=user)
adminedGroups = GroupProfile.objects.filter(admin_group__user=user)
superAdminedGroup = GroupProfile.objects.filter(superadmin=user)
context['found_user'] = user
context['found_user_profile'] = user.profile
context['joined_groups'] = joinedGroups
context['admined_groups'] = adminedGroups
context['super_admined_groups'] = superAdminedGroup
return context
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
class UserProfileViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserProfileSerializer
permission_classes = [IsAuthenticated, UserChangePermission]
def list(self, request):
return Response()
@list_route(methods=['post'], permission_classes = [IsAuthenticated, UserChangePermission] ,url_path='bulk_create')
def create_users(self, request):
# return
if not request.user.is_staff:
raise PermissionDenied
mp = {}
for m in request.data['users']:
if not m.has_key('password') or m['password'] == '':
m['password'] = get_rand_password()
mp[m['username']] = m['password']
serializer = UserProfileSerializer(
data=request.data['users'], many=True, context={'request': request}
)
if serializer.is_valid():
try:
users = serializer.save()
except Exception, ex:
logger.error("add user error: %s\n", ex)
raise
for r in serializer.data:
r['password'] = mp[r['username']]
group_pk = request.data.get('group_pk', None)
if group_pk and len(group_pk) > 0:
try:
group = GroupProfile.objects.get(pk=int(group_pk))
group.user_group.user_set.add(*users)
group.save()
except Exception, ex:
logger.error("add user to group error: %s", ex)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class GroupProfileViewSet(viewsets.ModelViewSet):
queryset = GroupProfile.objects.all()
serializer_class = GroupProfileSerializer
permission_classes = [IsAuthenticated, GroupChangePermission]
def list(self, request):
return Response()
@detail_route(methods=['post', 'get', 'put', ], url_path='members')
def manage_member(self, request, pk=None):
if not request.user.is_staff:
raise PermissionDenied
group = self.get_object()
if request.method == "POST" or request.method == "PUT":
users = []
errors = []
valid = 1
for x in request.data:
try:
user = User.objects.get(username=x['username'])
users.append(user)
errors.append({})
except User.DoesNotExist:
errors.append({"username": "user do not exsit"})
valid = 0
if not valid:
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
group.user_group.user_set.clear()
group.user_group.user_set.add(*users)
serializer = UserSlugSerializer(group.user_group.user_set, many=True)
return Response(serializer.data)
@detail_route(methods=['post', 'get', 'put', ], url_path='reset')
def reset_member(self, request, pk=None):
group = self.get_object()
if not group.can_reset_member_password_by_user(request.user):
raise PermissionDenied
if request.method == "POST":
users = []
errors = []
valid = 1
for x in request.data:
try:
user = User.objects.get(pk=int(x))
users.append(user)
errors.append({})
except User.DoesNotExist:
errors.append({"pk": "user do not exsit"})
valid = 0
if not valid:
return Response(errors, status=status.HTTP_400_BAD_REQUEST)
resp = []
for u in users:
pwd = get_rand_password()
u.set_password(pwd)
u.save()
resp.append({
'pk': u.pk,
'password': pwd,
})
serializer = UserResetSerializer(resp, many=True)
return Response(serializer.data)
return Response(status=status.HTTP_400_BAD_REQUEST)
class OjUserSignupView(SignupView):
form_class = UserProfileForm
def after_signup(self, form):
self.create_profile(form)
super(OjUserSignupView, self).after_signup(form)
def create_profile(self, form):
profile = self.created_user.profile
profile.nickname = form.cleaned_data["nickname"]
profile.gender = form.cleaned_data["gender"]
profile.save()
group = GroupProfile.objects.filter(name='public').first()
if group:
group.user_group.user_set.add(self.created_user)
class OjUserProfilesView(FormView):
template_name = 'account/profiles.html'
form_class = UserProfilesForm
# success_url = reverse_lazy('account')
success_url = '.'
messages = {
"profiles_updated": {
"level": messages.SUCCESS,
"text": _("Account profiles updated.")
},
}
def get_initial(self):
initial = super(OjUserProfilesView, self).get_initial()
profile = self.request.user.profile
initial["nickname"] = profile.nickname
initial["gender"] = profile.gender
return initial
def form_valid(self, form):
profile = self.request.user.profile
profile.gender = form.cleaned_data["gender"]
profile.nickname = form.cleaned_data["nickname"]
profile.save()
if self.messages.get("profiles_updated"):
messages.add_message(
self.request,
self.messages["profiles_updated"]["level"],
self.messages["profiles_updated"]["text"]
)
return redirect(self.get_success_url())
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Uniform distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
class Uniform(distribution.Distribution):
"""Uniform distribution with `low` and `high` parameters.
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; a, b) = I[a <= x < b] / Z
Z = b - a
```
where:
* `low = a`,
* `high = b`,
* `Z` is the normalizing constant, and,
* `I[predicate]` is the [indicator function](
https://en.wikipedia.org/wiki/Indicator_function) for `predicate`.
The parameters `low` and `high` must be shaped in a way that supports
broadcasting (e.g., `high - low` is a valid operation).
#### Examples
```python
# Without broadcasting:
u1 = Uniform(low=3.0, high=4.0) # a single uniform distribution [3, 4]
u2 = Uniform(low=[1.0, 2.0],
high=[3.0, 4.0]) # 2 distributions [1, 3], [2, 4]
u3 = Uniform(low=[[1.0, 2.0],
[3.0, 4.0]],
high=[[1.5, 2.5],
[3.5, 4.5]]) # 4 distributions
```
```python
# With broadcasting:
u1 = Uniform(low=3.0, high=[5.0, 6.0, 7.0]) # 3 distributions
```
"""
def __init__(self,
low=0.,
high=1.,
validate_args=False,
allow_nan_stats=True,
name="Uniform"):
"""Initialize a batch of Uniform distributions.
Args:
low: Floating point tensor, lower boundary of the output interval. Must
have `low < high`.
high: Floating point tensor, upper boundary of the output interval. Must
have `low < high`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
InvalidArgumentError: if `low >= high` and `validate_args=False`.
"""
parameters = locals()
with ops.name_scope(name, values=[low, high]) as ns:
with ops.control_dependencies([
check_ops.assert_less(
low, high, message="uniform not defined when low >= high.")
] if validate_args else []):
self._low = array_ops.identity(low, name="low")
self._high = array_ops.identity(high, name="high")
contrib_tensor_util.assert_same_float_dtype([self._low, self._high])
super(Uniform, self).__init__(
dtype=self._low.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._low,
self._high],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("low", "high"),
([ops.convert_to_tensor(sample_shape, dtype=dtypes.int32)] * 2)))
@property
def low(self):
"""Lower boundary of the output interval."""
return self._low
@property
def high(self):
"""Upper boundary of the output interval."""
return self._high
def range(self, name="range"):
"""`high - low`."""
with self._name_scope(name):
return self.high - self.low
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.low),
array_ops.shape(self.high))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.low.get_shape(),
self.high.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
samples = random_ops.random_uniform(shape=shape,
dtype=self.dtype,
seed=seed)
return self.low + self.range() * samples
def _log_prob(self, x):
return math_ops.log(self._prob(x))
def _prob(self, x):
broadcasted_x = x * array_ops.ones(self.batch_shape_tensor())
return array_ops.where(
math_ops.is_nan(broadcasted_x),
broadcasted_x,
array_ops.where(
math_ops.logical_or(broadcasted_x < self.low,
broadcasted_x >= self.high),
array_ops.zeros_like(broadcasted_x),
array_ops.ones_like(broadcasted_x) / self.range()))
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
def _cdf(self, x):
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x), self.batch_shape_tensor())
zeros = array_ops.zeros(broadcast_shape, dtype=self.dtype)
ones = array_ops.ones(broadcast_shape, dtype=self.dtype)
broadcasted_x = x * ones
result_if_not_big = array_ops.where(
x < self.low, zeros, (broadcasted_x - self.low) / self.range())
return array_ops.where(x >= self.high, ones, result_if_not_big)
def _entropy(self):
return math_ops.log(self.range())
def _mean(self):
return (self.low + self.high) / 2.
def _variance(self):
return math_ops.square(self.range()) / 12.
def _stddev(self):
return self.range() / math.sqrt(12.)
|
|
import json
import os
from functools import wraps
from flask import Flask, request, abort, redirect, url_for, render_template, \
send_from_directory, Blueprint, g, make_response
from ..api import MissingEntry
app = Flask(__name__, static_url_path='')
blueprint = Blueprint('formgrade', __name__)
def auth(f):
"""Authenticated flask app route."""
@wraps(f)
def authenticated(*args, **kwargs):
result = app.auth.authenticate()
if result is True:
pass # Success
elif result is False:
abort(403) # Forbidden
else:
return result # Redirect
return f(*args, **kwargs)
return authenticated
def set_index(url, request):
if 'index' in request.args:
return "{}?index={}".format(url, request.args.get('index'))
else:
return url
@app.errorhandler(500)
def internal_server_error(e):
return render_template(
'gradebook_500.tpl',
base_url=app.auth.base_url,
error_code=500), 500
@app.errorhandler(502)
def upstream_server_error(e):
return render_template(
'gradebook_500.tpl',
base_url=app.auth.base_url,
error_code=502), 502
@blueprint.errorhandler(403)
def unauthorized(e):
return render_template(
'gradebook_403.tpl',
base_url=app.auth.base_url,
error_code=403), 403
@blueprint.url_defaults
def bp_url_defaults(endpoint, values):
name = getattr(g, 'name', None)
if name is not None:
values.setdefault('name', name)
@blueprint.url_value_preprocessor
def bp_url_value_preprocessor(endpoint, values):
g.name = values.pop('name')
@blueprint.route("/static/<path:filename>")
def static_proxy(filename):
return send_from_directory(os.path.join(app.root_path, 'static'), filename)
@blueprint.route("/fonts/<filename>")
def fonts(filename):
return redirect(url_for('.static_proxy', filename=os.path.join("components", "bootstrap", "fonts", filename)))
@blueprint.route("/submissions/components/<path:filename>")
@auth
def components(filename):
return redirect(url_for('.static_proxy', filename=os.path.join("components", filename)))
@blueprint.route("/mathjax/<path:filename>")
@auth
def mathjax(filename):
return send_from_directory(os.path.dirname(app.mathjax_url), filename)
@blueprint.route("/")
@auth
def home():
return redirect(url_for('.view_assignments'))
@blueprint.route("/assignments/")
@auth
def view_assignments():
assignments = []
for assignment in app.gradebook.assignments:
x = assignment.to_dict()
x["average_score"] = app.gradebook.average_assignment_score(assignment.name)
x["average_code_score"] = app.gradebook.average_assignment_code_score(assignment.name)
x["average_written_score"] = app.gradebook.average_assignment_written_score(assignment.name)
assignments.append(x)
return render_template(
"assignments.tpl",
assignments=assignments,
base_url=app.auth.base_url)
@blueprint.route("/students/")
@auth
def view_students():
students = app.gradebook.student_dicts()
students.sort(key=lambda x: x.get("last_name") or "no last name")
return render_template(
"students.tpl",
students=students,
base_url=app.auth.base_url)
@blueprint.route("/assignments/<assignment_id>/")
@auth
def view_assignment(assignment_id):
try:
assignment = app.gradebook.find_assignment(assignment_id)
except MissingEntry:
abort(404)
notebooks = []
for notebook in assignment.notebooks:
x = notebook.to_dict()
x["average_score"] = app.gradebook.average_notebook_score(notebook.name, assignment.name)
x["average_code_score"] = app.gradebook.average_notebook_code_score(notebook.name, assignment.name)
x["average_written_score"] = app.gradebook.average_notebook_written_score(notebook.name, assignment.name)
notebooks.append(x)
assignment = assignment.to_dict()
return render_template(
"assignment_notebooks.tpl",
assignment=assignment,
notebooks=notebooks,
base_url=app.auth.base_url)
@blueprint.route("/students/<student_id>/")
@auth
def view_student(student_id):
try:
student = app.gradebook.find_student(student_id)
except MissingEntry:
abort(404)
submissions = []
for assignment in app.gradebook.assignments:
try:
submission = app.gradebook.find_submission(assignment.name, student.id).to_dict()
except MissingEntry:
submission = {
"id": None,
"name": assignment.name,
"student": student.id,
"duedate": None,
"timestamp": None,
"extension": None,
"total_seconds_late": 0,
"score": 0,
"max_score": assignment.max_score,
"code_score": 0,
"max_code_score": assignment.max_code_score,
"written_score": 0,
"max_written_score": assignment.max_written_score,
"needs_manual_grade": False
}
submissions.append(submission)
submissions.sort(key=lambda x: x.get("duedate") or "no due date")
student = student.to_dict()
return render_template(
"student_assignments.tpl",
assignments=submissions,
student=student,
base_url=app.auth.base_url)
@blueprint.route("/assignments/<assignment_id>/<notebook_id>/")
@auth
def view_assignment_notebook(assignment_id, notebook_id):
try:
app.gradebook.find_notebook(notebook_id, assignment_id)
except MissingEntry:
abort(404)
submissions = app.gradebook.notebook_submission_dicts(notebook_id, assignment_id)
submissions.sort(key=lambda x: x["id"])
for i, submission in enumerate(submissions):
submission["index"] = i
return render_template(
"notebook_submissions.tpl",
notebook_id=notebook_id,
assignment_id=assignment_id,
submissions=submissions,
base_url=app.auth.base_url)
@blueprint.route("/students/<student_id>/<assignment_id>/")
@auth
def view_student_assignment(student_id, assignment_id):
try:
assignment = app.gradebook.find_submission(assignment_id, student_id)
except MissingEntry:
abort(404)
submissions = [n.to_dict() for n in assignment.notebooks]
submissions.sort(key=lambda x: x['name'])
return render_template(
"student_submissions.tpl",
assignment_id=assignment_id,
student=assignment.student.to_dict(),
submissions=submissions,
base_url=app.auth.base_url
)
@blueprint.route("/submissions/<submission_id>/<path:path>")
@auth
def view_submission_files(submission_id, path):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
student_id = submission.student.id
except MissingEntry:
abort(404)
dirname = os.path.join(app.notebook_dir, app.notebook_dir_format.format(
nbgrader_step=app.nbgrader_step,
assignment_id=assignment_id,
student_id=student_id))
return send_from_directory(dirname, path)
@blueprint.route("/submissions/<submission_id>/next")
@auth
def view_next_submission(submission_id):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
notebook_id = submission.notebook.name
except MissingEntry:
abort(404)
submissions = app.gradebook.notebook_submissions(notebook_id, assignment_id)
# find next submission
submission_ids = sorted([x.id for x in submissions])
ix = submission_ids.index(submission.id)
if ix == (len(submissions) - 1):
return redirect(url_for('.view_assignment_notebook', assignment_id=assignment_id, notebook_id=notebook_id))
else:
return redirect(set_index(
url_for('.view_submission', submission_id=submission_ids[ix + 1]), request))
@blueprint.route("/submissions/<submission_id>/next_incorrect")
@auth
def view_next_incorrect_submission(submission_id):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
notebook_id = submission.notebook.name
except MissingEntry:
abort(404)
submissions = app.gradebook.notebook_submission_dicts(notebook_id, assignment_id)
# find next incorrect submission
incorrect_ids = set([x['id'] for x in submissions if x['failed_tests']])
incorrect_ids.add(submission.id)
incorrect_ids = sorted(incorrect_ids)
ix_incorrect = incorrect_ids.index(submission.id)
if ix_incorrect == (len(incorrect_ids) - 1):
return redirect(url_for('.view_assignment_notebook', assignment_id=assignment_id, notebook_id=notebook_id))
else:
return redirect(set_index(
url_for('.view_submission', submission_id=incorrect_ids[ix_incorrect + 1]), request))
@blueprint.route("/submissions/<submission_id>/prev")
@auth
def view_prev_submission(submission_id):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
notebook_id = submission.notebook.name
except MissingEntry:
abort(404)
submissions = app.gradebook.notebook_submissions(notebook_id, assignment_id)
# find previous submission
submission_ids = sorted([x.id for x in submissions])
ix = submission_ids.index(submission.id)
if ix == 0:
return redirect(url_for('.view_assignment_notebook', assignment_id=assignment_id, notebook_id=notebook_id))
else:
return redirect(set_index(
url_for('.view_submission', submission_id=submission_ids[ix - 1]), request))
@blueprint.route("/submissions/<submission_id>/prev_incorrect")
@auth
def view_prev_incorrect_submission(submission_id):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
notebook_id = submission.notebook.name
except MissingEntry:
abort(404)
submissions = app.gradebook.notebook_submission_dicts(notebook_id, assignment_id)
# find previous incorrect submission
incorrect_ids = set([x['id'] for x in submissions if x['failed_tests']])
incorrect_ids.add(submission.id)
incorrect_ids = sorted(incorrect_ids)
ix_incorrect = incorrect_ids.index(submission.id)
if ix_incorrect == 0:
return redirect(url_for('.view_assignment_notebook', assignment_id=assignment_id, notebook_id=notebook_id))
else:
return redirect(set_index(
url_for('.view_submission', submission_id=incorrect_ids[ix_incorrect - 1]), request))
@blueprint.route("/submissions/<submission_id>/")
@auth
def view_submission(submission_id):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
assignment_id = submission.assignment.assignment.name
notebook_id = submission.notebook.name
student_id = submission.student.id
except MissingEntry:
abort(404)
notebook_dir_format = os.path.join(app.notebook_dir_format, "{notebook_id}.ipynb")
filename = os.path.join(app.notebook_dir, notebook_dir_format.format(
nbgrader_step=app.nbgrader_step,
assignment_id=assignment_id,
notebook_id=notebook_id,
student_id=student_id))
submissions = app.gradebook.notebook_submissions(notebook_id, assignment_id)
submission_ids = sorted([x.id for x in submissions])
ix = submission_ids.index(submission.id)
server_exists = app.auth.notebook_server_exists()
server_cookie = app.auth.get_notebook_server_cookie()
if app.mathjax_url.startswith("http"):
mathjax_url = app.mathjax_url
else:
mathjax_url = url_for(".mathjax", filename='MathJax.js')
resources = {
'assignment_id': assignment_id,
'notebook_id': notebook_id,
'submission_id': submission.id,
'index': ix,
'total': len(submissions),
'notebook_server_exists': server_exists,
'base_url': app.auth.base_url,
'mathjax_url': mathjax_url
}
if server_exists:
relative_path = os.path.relpath(filename, app.notebook_dir)
resources['notebook_path'] = app.auth.get_notebook_url(relative_path)
if not os.path.exists(filename):
return render_template('formgrade_404.tpl', resources=resources), 404
output, resources = app.exporter.from_filename(filename, resources=resources)
response = make_response(output)
if server_cookie:
response.set_cookie(**server_cookie)
return response
@blueprint.route("/api/grades")
@auth
def get_all_grades():
submission_id = request.args["submission_id"]
try:
notebook = app.gradebook.find_submission_notebook_by_id(submission_id)
except MissingEntry:
abort(404)
return json.dumps([g.to_dict() for g in notebook.grades])
@blueprint.route("/api/comments")
@auth
def get_all_comments():
submission_id = request.args["submission_id"]
try:
notebook = app.gradebook.find_submission_notebook_by_id(submission_id)
except MissingEntry:
abort(404)
return json.dumps([c.to_dict() for c in notebook.comments])
@blueprint.route("/api/grade/<_id>", methods=["GET", "PUT"])
@auth
def get_grade(_id):
try:
grade = app.gradebook.find_grade_by_id(_id)
except MissingEntry:
abort(404)
if request.method == "PUT":
grade.manual_score = request.json.get("manual_score", None)
if grade.manual_score is None and grade.auto_score is None:
grade.needs_manual_grade = True
else:
grade.needs_manual_grade = False
app.gradebook.db.commit()
return json.dumps(grade.to_dict())
@blueprint.route("/api/comment/<_id>", methods=["GET", "PUT"])
@auth
def get_comment(_id):
try:
comment = app.gradebook.find_comment_by_id(_id)
except MissingEntry:
abort(404)
if request.method == "PUT":
comment.manual_comment = request.json.get("manual_comment", None)
app.gradebook.db.commit()
return json.dumps(comment.to_dict())
@blueprint.route("/api/submission/<submission_id>/flag")
@auth
def flag_submission(submission_id):
try:
submission = app.gradebook.find_submission_notebook_by_id(submission_id)
except MissingEntry:
abort(404)
submission.flagged = not submission.flagged
app.gradebook.db.commit()
return json.dumps(submission.to_dict())
app.register_blueprint(blueprint, url_defaults={'name': ''})
if __name__ == "__main__":
app.run(debug=True)
|
|
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.power import Pow
from sympy.core.function import Function
from sympy.core.symbol import Symbol, Wild
from sympy.core.basic import S, C, Atom, sympify
from sympy.core.numbers import Integer, Rational
from sympy.functions import exp, sin , cos , tan , cot , asin
from sympy.functions import log, sinh, cosh, tanh, coth, asinh
from sympy.functions import sqrt, erf
from sympy.solvers import solve
from sympy.simplify import simplify, together
from sympy.polys import Poly, quo, gcd, lcm, root_factors, \
monomials, factor, PolynomialError
from sympy.utilities.iterables import make_list
def components(f, x):
"""Returns a set of all functional components of the given expression
which includes symbols, function applications and compositions and
non-integer powers. Fractional powers are collected with with
minimal, positive exponents.
>>> from sympy import *
>>> x, y = symbols('xy')
>>> components(sin(x)*cos(x)**2, x)
set([x, cos(x), sin(x)])
"""
result = set()
if f.has(x):
if f.is_Symbol:
result.add(f)
elif f.is_Function or f.is_Derivative:
for g in f.args:
result |= components(g, x)
result.add(f)
elif f.is_Pow:
result |= components(f.base, x)
if not f.exp.is_Integer:
if f.exp.is_Rational:
result.add(f.base**Rational(1, f.exp.q))
else:
result |= components(f.exp, x) | set([f])
else:
for g in f.args:
result |= components(g, x)
return result
# name -> [] of symbols
_symbols_cache = {}
# NB @cacheit is not convenient here
def _symbols(name, n):
"""get vector of symbols local to this module"""
try:
lsyms = _symbols_cache[name]
except KeyError:
lsyms = []
_symbols_cache[name] = lsyms
while len(lsyms) < n:
lsyms.append( Symbol('%s%i' % (name, len(lsyms)), dummy=True) )
return lsyms[:n]
def heurisch(f, x, **kwargs):
"""Compute indefinite integral using heuristic Risch algorithm.
This is a huristic approach to indefinite integration in finite
terms using extened heuristic (parallel) Risch algorithm, based
on Manuel Bronstein's "Poor Man's Integrator".
The algorithm supports various classes of functions including
transcendental elementary or special functions like Airy,
Bessel, Whittaker and Lambert.
Note that this algorithm is not a decision procedure. If it isn't
able to compute antiderivative for a given function, then this is
not a proof that such a functions does not exist. One should use
recursive Risch algorithm in such case. It's an open question if
this algorithm can be made a full decision procedure.
This is an internal integrator procedure. You should use toplevel
'integrate' function in most cases, as this procedure needs some
preprocessing steps and otherwise may fail.
Specificaion
============
heurisch(f, x, rewrite=False, hints=None)
where
f : expression
x : symbol
rewrite -> force rewrite 'f' in terms of 'tan' and 'tanh'
hints -> a list of functions that may appear in antiderivate
- hints = None --> no suggestions at all
- hints = [ ] --> try to figure out
- hints = [f1, ..., fn] --> we know better
Examples
========
>>> from sympy import *
>>> x,y = symbols('xy')
>>> heurisch(y*tan(x), x)
y*log(1 + tan(x)**2)/2
See Manuel Bronstein's "Poor Man's Integrator":
[1] http://www-sop.inria.fr/cafe/Manuel.Bronstein/pmint/index.html
For more information on the implemented algorithm refer to:
[2] K. Geddes, L.Stefanus, On the Risch-Norman Integration
Method and its Implementation in Maple, Proceedings of
ISSAC'89, ACM Press, 212-217.
[3] J. H. Davenport, On the Parallel Risch Algorithm (I),
Proceedings of EUROCAM'82, LNCS 144, Springer, 144-157.
[4] J. H. Davenport, On the Parallel Risch Algorithm (III):
Use of Tangents, SIGSAM Bulletin 16 (1982), 3-6.
[5] J. H. Davenport, B. M. Trager, On the Parallel Risch
Algorithm (II), ACM Transactions on Mathematical
Software 11 (1985), 356-362.
"""
f = sympify(f)
if not f.is_Add:
indep, f = f.as_independent(x)
else:
indep = S.One
if not f.has(x):
return indep * f * x
rewritables = {
(sin, cos, cot) : tan,
(sinh, cosh, coth) : tanh,
}
rewrite = kwargs.pop('rewrite', False)
if rewrite:
for candidates, rule in rewritables.iteritems():
f = f.rewrite(candidates, rule)
else:
for candidates in rewritables.iterkeys():
if f.has(*candidates):
break
else:
rewrite = True
terms = components(f, x)
hints = kwargs.get('hints', None)
if hints is not None:
if not hints:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
for g in set(terms):
if g.is_Function:
if g.func is exp:
M = g.args[0].match(a*x**2)
if M is not None:
terms.add(erf(sqrt(-M[a])*x))
elif g.is_Pow:
if g.exp.is_Rational and g.exp.q == 2:
M = g.base.match(a*x**2 + b)
if M is not None and M[b].is_positive:
if M[a].is_positive:
terms.add(asinh(sqrt(M[a]/M[b])*x))
elif M[a].is_negative:
terms.add(asin(sqrt(-M[a]/M[b])*x))
else:
terms |= set(hints)
for g in set(terms):
terms |= components(g.diff(x), x)
V = _symbols('x', len(terms))
mapping = dict(zip(terms, V))
rev_mapping = {}
for k, v in mapping.iteritems():
rev_mapping[v] = k
def substitute(expr):
return expr.subs(mapping)
diffs = [ substitute(simplify(g.diff(x))) for g in terms ]
denoms = [ g.as_numer_denom()[1] for g in diffs ]
denom = reduce(lambda p, q: lcm(p, q, V), denoms)
numers = [ Poly.cancel(denom * g, *V) for g in diffs ]
def derivation(h):
return Add(*[ d * h.diff(v) for d, v in zip(numers, V) ])
def deflation(p):
for y in V:
if not p.has_any_symbols(y):
continue
if derivation(p) is not S.Zero:
c, q = p.as_poly(y).as_primitive()
return deflation(c)*gcd(q, q.diff(y))
else:
return p
def splitter(p):
for y in V:
if not p.has_any_symbols(y):
continue
if derivation(y) is not S.Zero:
c, q = p.as_poly(y).as_primitive()
q = q.as_basic()
h = gcd(q, derivation(q), y)
s = quo(h, gcd(q, q.diff(y), y), y)
c_split = splitter(c)
if s.as_poly(y).degree == 0:
return (c_split[0], q * c_split[1])
q_split = splitter(Poly.cancel((q, s), *V))
return (c_split[0]*q_split[0]*s, c_split[1]*q_split[1])
else:
return (S.One, p)
special = {}
for term in terms:
if term.is_Function:
if term.func is tan:
special[1 + substitute(term)**2] = False
elif term.func is tanh:
special[1 + substitute(term)] = False
special[1 - substitute(term)] = False
elif term.func is C.LambertW:
special[substitute(term)] = True
F = substitute(f)
P, Q = F.as_numer_denom()
u_split = splitter(denom)
v_split = splitter(Q)
polys = list(v_split) + [ u_split[0] ] + special.keys()
s = u_split[0] * Mul(*[ k for k, v in special.iteritems() if v ])
a, b, c = [ p.as_poly(*V).degree for p in [s, P, Q] ]
poly_denom = s * v_split[0] * deflation(v_split[1])
def exponent(g):
if g.is_Pow:
if g.exp.is_Rational and g.exp.q != 1:
if g.exp.p > 0:
return g.exp.p + g.exp.q - 1
else:
return abs(g.exp.p + g.exp.q)
else:
return 1
elif not g.is_Atom:
return max([ exponent(h) for h in g.args ])
else:
return 1
A, B = exponent(f), a + max(b, c)
if A > 1 and B > 1:
monoms = monomials(V, A + B - 1)
else:
monoms = monomials(V, A + B)
poly_coeffs = _symbols('A', len(monoms))
poly_part = Add(*[ poly_coeffs[i]*monomial
for i, monomial in enumerate(monoms) ])
reducibles = set()
for poly in polys:
if poly.has(*V):
try:
factorization = factor(poly, *V)
except PolynomialError:
factorization = poly
if factorization.is_Mul:
reducibles |= set(factorization.args)
else:
reducibles.add(factorization)
def integrate(field=None):
irreducibles = set()
for poly in reducibles:
for z in poly.atoms(Symbol):
if z in V:
break
else:
continue
irreducibles |= set(root_factors(poly, z, domain=field))
log_coeffs, log_part = [], []
B = _symbols('B', len(irreducibles))
for i, poly in enumerate(irreducibles):
if poly.has(*V):
log_coeffs.append(B[i])
log_part.append(log_coeffs[-1] * log(poly))
coeffs = poly_coeffs + log_coeffs
candidate = poly_part/poly_denom + Add(*log_part)
h = together(F - derivation(candidate) / denom)
numer = h.as_numer_denom()[0].expand()
equations = {}
for term in make_list(numer, Add):
coeff, dependent = term.as_independent(*V)
if dependent in equations:
equations[dependent] += coeff
else:
equations[dependent] = coeff
solution = solve(equations.values(), *coeffs)
if solution is not None:
return (solution, candidate, coeffs)
else:
return None
if not (F.atoms(Symbol) - set(V)):
result = integrate('Q')
if result is None:
result = integrate()
else:
result = integrate()
if result is not None:
(solution, candidate, coeffs) = result
antideriv = candidate.subs(solution)
for coeff in coeffs:
if coeff not in solution:
antideriv = antideriv.subs(coeff, S.Zero)
antideriv = antideriv.subs(rev_mapping)
antideriv = simplify(antideriv).expand()
if antideriv.is_Add:
antideriv = antideriv.as_independent(x)[1]
return indep * antideriv
else:
if not rewrite:
result = heurisch(f, x, rewrite=True, **kwargs)
if result is not None:
return indep * result
return None
|
|
from filebeat import BaseTest
import os
import time
import unittest
"""
Tests for the prospector functionality.
"""
class Test(BaseTest):
def test_ignore_old_files(self):
"""
Should ignore files there were not modified for longer then
the `ignore_older` setting.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignoreOlder="1s"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 5
for n in range(0, iterations):
file.write("hello world") # 11 chars
file.write("\n") # 1 char
file.close()
# sleep for more than ignore older
time.sleep(2)
proc = self.start_beat()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.log_contains(
"Skipping file (older than ignore older of 1s"),
max_timeout=10)
proc.check_kill_and_wait()
def test_not_ignore_old_files(self):
"""
Should not ignore files there were modified more recent than
the ignore_older settings.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignoreOlder="15s"
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
iterations = 5
for n in range(0, iterations):
file.write("hello world") # 11 chars
file.write("\n") # 1 char
file.close()
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=iterations), max_timeout=10)
proc.check_kill_and_wait()
objs = self.read_output()
assert len(objs) == 5
def test_stdin(self):
"""
Test stdin input. Checks if reading is continued after the first read.
"""
self.render_config_template(
input_type="stdin"
)
proc = self.start_beat()
self.wait_until(
lambda: self.log_contains(
"Harvester started for file: -"),
max_timeout=10)
iterations1 = 5
for n in range(0, iterations1):
os.write(proc.stdin_write, "Hello World\n")
self.wait_until(
lambda: self.output_has(lines=iterations1),
max_timeout=15)
iterations2 = 10
for n in range(0, iterations2):
os.write(proc.stdin_write, "Hello World\n")
self.wait_until(
lambda: self.output_has(lines=iterations1 + iterations2),
max_timeout=15)
proc.check_kill_and_wait()
objs = self.read_output()
assert len(objs) == iterations1 + iterations2
@unittest.skip("Needs fix from #964")
def test_rotating_close_older_larger_write_rate(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignoreOlder="10s",
closeOlder="1s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
proc = self.start_beat()
time.sleep(1)
rotations = 2
iterations = 3
for r in range(rotations):
with open(testfile, 'w', 0) as file:
for n in range(iterations):
file.write("hello world {}\n".format(r * iterations + n))
time.sleep(0.1)
os.rename(testfile, testfile + str(time.time()))
lines = rotations * iterations
self.wait_until(
# allow for events to be send multiple times due to log rotation
lambda: self.output_count(lambda x: x >= lines),
max_timeout=15)
proc.check_kill_and_wait()
def test_exclude_files(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
exclude_files=[".gz$"]
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.gz"
file = open(testfile, 'w')
file.write("line in gz file\n")
file.close()
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w')
file.write("line in log file\n")
file.close()
filebeat = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=15)
# TODO: Find better solution when filebeat did crawl the file
# Idea: Special flag to filebeat so that filebeat is only doing and
# crawl and then finishes
filebeat.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 1 == len(output)
assert output[0]["message"] == "line in log file"
def test_rotating_close_older_low_write_rate(self):
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignoreOlder="10s",
closeOlder="1s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
# log rotate
os.rename(testfile, testfile + ".1")
open(testfile, 'w').close()
# wait for file to be closed due to close_older
self.wait_until(
lambda: self.log_contains(
"Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
# wait a bit longer (on 1.0.1 this would cause the harvester
# to get in a state that resulted in it watching the wrong
# inode for changes)
time.sleep(2)
# write second line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
self.wait_until(
# allow for events to be send multiple times due to log rotation
lambda: self.output_count(lambda x: x >= lines),
max_timeout=5)
filebeat.check_kill_and_wait()
def test_shutdown_no_prospectors(self):
"""
In case no prospectors are defined, filebeat must shut down and report an error
"""
self.render_config_template(
prospectors=False,
)
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"No prospectors defined"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains(
"Exiting"),
max_timeout=10)
filebeat.check_kill_and_wait(exit_code=1)
def test_no_paths_defined(self):
"""
In case a prospector is defined but doesn't contain any paths, prospector must return error which
leads to shutdown of filebeat because of configuration error
"""
self.render_config_template(
)
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"No paths were defined for prospector"),
max_timeout=10)
self.wait_until(
lambda: self.log_contains(
"Exiting"),
max_timeout=10)
filebeat.check_kill_and_wait(exit_code=1)
def test_files_added_late(self):
"""
Tests that prospectors stay running even though no harvesters are started yet
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
)
os.mkdir(self.working_dir + "/log/")
filebeat = self.start_beat()
# wait until events are sent for the first time
self.wait_until(
lambda: self.log_contains(
"Events flushed"),
max_timeout=10)
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'a') as file:
file.write("Hello World1\n")
file.write("Hello World2\n")
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=15)
filebeat.check_kill_and_wait()
def test_close_older(self):
"""
Test that close_older closes the file but reading
is picked up again after scan_frequency
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignoreOlder="1h",
closeOlder="1s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
# wait for file to be closed due to close_older
self.wait_until(
lambda: self.log_contains(
"Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
# write second line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
self.wait_until(
# allow for events to be sent multiple times due to log rotation
lambda: self.output_count(lambda x: x >= lines),
max_timeout=5)
filebeat.check_kill_and_wait()
def test_close_older_file_removal(self):
"""
Test that close_older still applies also if the file to close was removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
ignoreOlder="1h",
closeOlder="3s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.remove(testfile)
# wait for file to be closed due to close_older
self.wait_until(
lambda: self.log_contains(
"Stopping harvester, closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
filebeat.check_kill_and_wait()
def test_close_older_file_rotation_and_removal(self):
"""
Test that close_older still applies also if the file to close was removed
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
ignoreOlder="1h",
closeOlder="3s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
renamed_file = self.working_dir + "/log/test_renamed.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.rename(testfile, renamed_file)
os.remove(renamed_file)
# wait for file to be closed due to close_older
self.wait_until(
lambda: self.log_contains(
# Still checking for old file name as filename does not change in harvester
"Closing file: {}\n".format(os.path.abspath(testfile))),
max_timeout=10)
filebeat.check_kill_and_wait()
def test_close_older_file_rotation_and_removal(self):
"""
Test that close_older still applies also if file was rotated,
new file created, and rotated file removed.
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/test.log",
ignoreOlder="1h",
closeOlder="3s",
scan_frequency="0.1s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
renamed_file = self.working_dir + "/log/test_renamed.log"
filebeat = self.start_beat()
# wait for first "Start next scan" log message
self.wait_until(
lambda: self.log_contains(
"Start next scan"),
max_timeout=10)
lines = 0
# write first line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.rename(testfile, renamed_file)
# write second line
lines += 1
with open(testfile, 'a') as file:
file.write("Line {}\n".format(lines))
# wait for log to be read
self.wait_until(
lambda: self.output_has(lines=lines),
max_timeout=15)
os.remove(renamed_file)
# Wait until both files are closed
self.wait_until(
lambda: self.log_contains_count(
# Checking if two files were closed
"Stopping harvester, closing file: ") == 2,
max_timeout=10)
filebeat.check_kill_and_wait()
|
|
import os
import datetime as dt
import time
import glob
import pytz
import netCDF4 as nc
import numpy as np
import extract_utils
import extract_from_file
__version__ = '0.1.5'
# def file_time(f):
# UTC = pytz.timezone('UTC')
# ncf = nc.Dataset(f,mode='r')
# ot = ncf.variables['ocean_time']
# base_time = dt.datetime.strptime(ot.units,'seconds since %Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
# offset = dt.timedelta(seconds=ot[0][0])
# ncf.close()
# return base_time + offset
#
# def file_timestamp(f):
# t = file_time(f)
# return time.mktime(t.timetuple())
#
# def calc_num_time_slices(file1,file2,td):
# d1 = file_time(file1)
# d2 = file_time(file2)
#
# gap = d2-d1
# gap_seconds = gap.days*60*60*24 + gap.seconds
# td_seconds = td.days*60*60*24 + td.seconds
#
# t1 = time.mktime(d1.timetuple())
# t2 = time.mktime(d2.timetuple())
# tgap = t2 - t1
#
# print(gap_seconds, tgap)
#
# return int(gap_seconds/td_seconds + 1)
#
# def filelist_from_datelist(datelist, basedir='/Users/lederer/Repositories/PSVS/rompy/', basename='ocean_his_*.nc'):
# files = glob.glob(os.path.join(basedir,basename))
# files.sort()
#
# master_file_list = []
# master_timestamp_list = []
# master_datetime_list = []
# timelist = []
# returnlist = []
#
# for file in files:
# master_file_list.append(file)
# master_timestamp_list.append(file_timestamp(file))
# master_datetime_list.append(file_time(file))
#
# tsarray = np.array(master_timestamp_list)
#
# for d in datelist:
# try:
# t = time.mktime(d.timetuple())
# timelist.append(t)
# tl = np.nonzero(tsarray <= t)[0][-1]
# th = np.nonzero(t <= tsarray)[0][0]
# if not tl == th:
# fraction = (t-tsarray[th])/(tsarray[tl]-tsarray[th])
# else:
# fraction = 0.0
#
# returnlist.append({
# 'file0': master_file_list[tl],
# 'file1': master_file_list[th],
# 'fraction': fraction,
# 'timestamp':t,
# 'datetime':d
# })
# except IndexError, e:
# print('Index out of bounds for %s' % (d.isoformat()))
#
# return returnlist
def extract_from_series(file_list,extraction_type='point',varname='zeta',**kwargs):
UTC = pytz.timezone('UTC')
pacific = pytz.timezone('US/Pacific')
#print('Hello from extractFromSeries')
if extraction_type == 'point':
# assume var is a 2d var for now
file_list.sort()
x = kwargs['x']
y = kwargs['y']
data = np.zeros((len(file_list),len(x)))
for i in range(len(x)):
data[0,i],junk = extract_from_file.extract_from_file(file_list[0],varname=varname,extraction_type='point',x=x[i],y=y[i])
f1 = file_list[0]
f2 = file_list[1]
time_list = [extract_utils.file_time(file_list[0])]
for i in range(1,len(file_list)):
for j in range(len(x)):
data[i,j],junk = extract_from_file.extract_from_file(file_list[i],varname=varname,extraction_type='point',x=x[j],y=y[j])
time_list.append(extract_utils.file_time(file_list[i]))
return (data,time_list)
# for i in range(1,ntimes):
# time_list.append(time_list[-1]+freq)
if extraction_type == 'profile':
file_list.sort()
ocean_time = None
data = None
z = None
x = kwargs['x']
y = kwargs['y']
if len(x) > 1:
x = np.array(x[0])
else:
x = np.array(x)
if len(y) > 1:
y = np.array(y[0])
else:
y = np.array(y)
for i in range(len(file_list)):
file = file_list[i]
if not os.path.exists(file):
raise IOError('File %s could not be located on the filesystem' %file)
ncf = nc.Dataset(file,mode='r')
if varname not in ncf.variables:
raise IOError('File %s does not have a variable named %s' % (file, varname))
ncf.close()
d,junk = extract_from_file.extract_from_file(file_list[i],varname=varname,extraction_type='profile',x=x,y=y)
if data == None:
data = np.zeros((d.shape[0],len(file_list)))
data[:,i] = d.T
if z == None:
z = np.zeros(data.shape)
z[:,i] = junk['zm'].T
# ocean_time = np.zeros(data.shape)
if ocean_time == None:
ocean_time = np.zeros(data.shape)
ot = extract_utils.file_timestamp(file_list[i])
# ot = file_time(file)
for j in range(data.shape[0]):
ocean_time[j,i] = ot
return (data, ocean_time,z)
return
def extract_from_datetime_list(datelist,x,y,varname='salt',basedir='./',**kwargs):
filelist = extract_utils.filelist_from_datelist(datelist, basedir=basedir)
for f in filelist:
(d0,ot0,z0) = extract_from_series([f['file0']],x=x,y=y,varname=varname,extraction_type='profile',**kwargs)
(d1,ot1,z1) = extract_from_series([f['file1']],x=x,y=y,varname=varname,extraction_type='profile',**kwargs)
di = np.zeros(d0.shape)
oti = np.zeros(d0.shape)
zi = np.zeros(d0.shape)
f['d'] = d0 + (d1-d0)*f['fraction']
f['ot'] = ot0 + (ot1-ot0)*f['fraction']
f['z'] = z0 + (z1-z0)*f['fraction']
d = np.zeros((filelist[0]['d'].shape[0], len(filelist)*filelist[0]['d'].shape[1]))
ot = np.zeros((filelist[0]['d'].shape[0], len(filelist)*filelist[0]['d'].shape[1]))
z = np.zeros((filelist[0]['d'].shape[0], len(filelist)*filelist[0]['d'].shape[1]))
for i in range(len(filelist)):
d[:,i] = filelist[i]['d'].T
ot[:,i] = filelist[i]['ot'].T
z[:,i] = filelist[i]['z'].T
return (d,ot,z)
def extract_from_two_datetimes(x,y,dt0,dt1,varname='salt',interval=3600,basedir='./',**kwargs):
t0 = time.mktime(dt0.timetuple())
t1 = time.mktime(dt1.timetuple())
interval = float(interval)
time_stamps = interval*np.arange(t0/interval,(t1/interval)+1)
date_times = []
for ts in time_stamps:
date_times.append(dt.datetime.fromtimestamp(ts))
# for d in date_times:
# print(d.isoformat())
print(dt0.isoformat(),date_times[0].isoformat())
print(dt1.isoformat(),date_times[-1].isoformat())
return extract_from_datetime_list(date_times,x,y,varname=varname,basedir=basedir,**kwargs)
|
|
from flask.ext.restful import fields
from flask.ext.restful_swagger import swagger
from AcceptEnvironmentProperties import AcceptEnvironmentProperties
from Asset import Asset
from AssetEnvironmentProperties import AssetEnvironmentProperties
from Attacker import Attacker
from AttackerEnvironmentProperties import AttackerEnvironmentProperties
from Dependency import Dependency
from Goal import Goal
from GoalEnvironmentProperties import GoalEnvironmentProperties
from MisuseCase import MisuseCase
from MisuseCaseEnvironmentProperties import MisuseCaseEnvironmentProperties
from MitigateEnvironmentProperties import MitigateEnvironmentProperties
from Requirement import Requirement
from Risk import Risk
from Role import Role
from ThreatEnvironmentProperties import ThreatEnvironmentProperties
from TransferEnvironmentProperties import TransferEnvironmentProperties
from ValueType import ValueType
from Vulnerability import Vulnerability
from VulnerabilityEnvironmentProperties import VulnerabilityEnvironmentProperties
from tools.PseudoClasses import EnvironmentTensionModel, SecurityAttribute, ValuedRole, RiskRating
__author__ = 'Robin Quetin'
obj_id_field = "__python_obj__"
likelihood_metadata = { "enum": ['Incredible', 'Improbable', 'Remote', 'Occasional', 'Probable', 'Frequent'] }
severity_metadata = { "enum": ['Negligible', 'Marginal', 'Critical', 'Catastrophic'] }
def gen_class_metadata(class_ref):
return {
"enum": [class_ref.__module__+'.'+class_ref.__name__]
}
@swagger.model
@swagger.nested(attributes=SecurityAttribute.__name__)
class AssetEnvironmentPropertiesModel(object):
def __init__(self, env_name='', associations=[], attributes=[]):
self.environment = env_name
self.associations = associations
self.attributes = attributes
self.attributesDictionary = {}
def json_prepare(self):
self.attributes = self.attributesDictionary.values()
self.attributesDictionary = {}
for idx in range(0, len(self.associations)):
self.associations[idx] = list(self.associations[idx])
resource_fields = {
"__python_obj__": fields.String,
"theAssociations": fields.List(fields.List(fields.String)),
"theProperties": fields.List(fields.Nested(SecurityAttribute.resource_fields)),
"theEnvironmentName": fields.String,
"theRationale": fields.List(fields.String)
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove("theRationale")
swagger_metadata = {
obj_id_field: gen_class_metadata(AssetEnvironmentProperties)
}
@swagger.model
@swagger.nested(
theEnvironmentProperties=AssetEnvironmentPropertiesModel.__name__
)
class AssetModel(object):
resource_fields = {
obj_id_field: fields.String,
"theDescription": fields.String,
"theSignificance": fields.String,
"theId": fields.Integer,
"theTags": fields.List(fields.String),
"theCriticalRationale": fields.String,
"theInterfaces": fields.List(fields.String),
"theType": fields.String,
"theName": fields.String,
"isCritical": fields.Integer,
"theShortCode": fields.String,
"theEnvironmentProperties": fields.List(fields.Nested(AssetEnvironmentPropertiesModel.resource_fields))
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = {
obj_id_field : gen_class_metadata(Asset)
}
@swagger.model
class CapabilityModel(object):
resource_fields = {
"name": fields.String,
"value": fields.String
}
required = resource_fields.keys()
@swagger.model
@swagger.nested(
theCapabilities=CapabilityModel.__name__
)
class AttackerEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theMotives': fields.List(fields.String),
'theRoles': fields.List(fields.String),
'theCapabilities': fields.List(fields.Nested(CapabilityModel.resource_fields)),
'theEnvironmentName': fields.String,
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = {
obj_id_field: gen_class_metadata(AttackerEnvironmentProperties)
}
@swagger.model
@swagger.nested(
theEnvironmentProperties=AttackerEnvironmentPropertiesModel.__name__
)
class AttackerModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEnvironmentDictionary': fields.List(fields.Nested(AttackerEnvironmentPropertiesModel.resource_fields)),
'theDescription': fields.String,
'theId': fields.Integer,
'theTags': fields.List(fields.String),
'isPersona': fields.Integer,
'theName': fields.String,
'theImage': fields.String,
'theEnvironmentProperties': fields.List(fields.Nested(AttackerEnvironmentPropertiesModel.resource_fields)),
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove('theEnvironmentDictionary')
swagger_metadata = {
obj_id_field: gen_class_metadata(Attacker)
}
@swagger.model
class CImportParams(object):
resource_fields = {
'urlenc_file_contents': fields.String,
'type': fields.String,
'overwrite': fields.Integer
}
required = resource_fields.keys()
required.remove('overwrite')
swagger_metadata = {
'type': {
'enum': [
'securitypattern',
'attackpattern',
'tvtypes',
'directory',
'requirements',
'riskanalysis',
'usability',
'project',
'domainvalues',
'architecturalpattern',
'associations',
'synopses',
'processes',
'assets',
'all'
]
}
}
@swagger.model
class DependencyModel(object):
resource_fields = {
obj_id_field: fields.String,
'theDependencyType': fields.String,
'theRationale': fields.String,
'theEnvironmentName': fields.String,
'theDepender': fields.String,
'theDependee': fields.String,
'theDependency': fields.String,
'theId': fields.Integer,
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metamodel = {
obj_id_field : gen_class_metadata(Dependency)
}
@swagger.model
@swagger.nested(
theTensions=EnvironmentTensionModel.__name__,
)
class EnvironmentModel(object):
resource_fields = {
obj_id_field: fields.String,
"theId": fields.Integer,
"theName": fields.String,
"theShortCode": fields.String,
"theDescription": fields.String,
"theEnvironments": fields.List(fields.String),
"theDuplicateProperty": fields.String,
"theOverridingEnvironment": fields.String,
"theTensions": fields.List(fields.Nested(EnvironmentTensionModel.resource_fields)),
}
required = resource_fields.keys()
required.remove(obj_id_field)
@swagger.model
class GoalEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theCategory": fields.String,
"theConcernAssociations": fields.List(fields.String),
"theConcerns": fields.List(fields.String),
"theDefinition": fields.String,
"theEnvironmentName": fields.String,
"theFitCriterion": fields.String,
"theGoalRefinements": fields.List(fields.String),
"theIssue": fields.String,
"theLabel": fields.String,
"thePriority": fields.String,
"theSubGoalRefinements": fields.List(fields.String)
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = {
obj_id_field : gen_class_metadata(GoalEnvironmentProperties)
}
@swagger.model
@swagger.nested(
theEnvironmentProperties=GoalEnvironmentPropertiesModel.__name__
)
class GoalModel(object):
resource_fields = {
obj_id_field: fields.String,
"theColour": fields.String,
"theEnvironmentDictionary": fields.List,
"theEnvironmentProperties": fields.List(fields.Nested(GoalEnvironmentPropertiesModel.resource_fields)),
"theId": fields.Integer,
"theName": fields.String,
"theOriginator": fields.String,
"theTags": fields.List(fields.String)
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove("theEnvironmentDictionary")
swagger_metadata = {
obj_id_field : gen_class_metadata(Goal)
}
@swagger.model
@swagger.nested(
theRiskRating=RiskRating.__name__
)
class MisuseCaseEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theAssets": fields.List(fields.String),
"theAttackers": fields.List(fields.String),
"theDescription": fields.String,
"theEnvironmentName": fields.String,
"theObjective": fields.String,
"theLikelihood": fields.String,
"theRiskRating": fields.Nested(RiskRating.resource_fields),
"theSeverity": fields.String,
}
required = ["theDescription", "theEnvironmentName"]
swagger_metadata = {
obj_id_field : gen_class_metadata(MisuseCaseEnvironmentProperties),
"theLikelihood": likelihood_metadata,
"theSeverity": severity_metadata
}
@swagger.model
@swagger.nested(
theEnvironmentProperties=MisuseCaseEnvironmentPropertiesModel.__name__
)
class MisuseCaseModel(object):
resource_fields = {
obj_id_field: fields.String,
"theId": fields.Integer,
"theName": fields.String,
"theThreatName": fields.String,
"theRiskName": fields.String,
"theVulnerabilityName": fields.String,
"theEnvironmentDictionary": fields.List(fields.Nested(MisuseCaseEnvironmentPropertiesModel.resource_fields)),
"theEnvironmentProperties": fields.List(fields.Nested(MisuseCaseEnvironmentPropertiesModel.resource_fields))
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove("theEnvironmentDictionary")
swagger_metadata = {
obj_id_field : gen_class_metadata(MisuseCase)
}
@swagger.model
class RequirementAttributesModel(object):
resource_fields = {
"originator": fields.String,
"supportingMaterial": fields.String,
"fitCriterion": fields.String,
"asset": fields.String,
"rationale": fields.String,
"type": fields.String
}
@swagger.model
@swagger.nested(
dirtyAttrs=RequirementAttributesModel.__name__,
attrs=RequirementAttributesModel.__name__
)
class RequirementModel(object):
resource_fields = {
obj_id_field: fields.String,
"theId": fields.Integer,
"dirtyAttrs": fields.Nested(RequirementAttributesModel.resource_fields),
"attrs": fields.Nested(RequirementAttributesModel.resource_fields),
"theName": fields.String,
"theLabel": fields.String,
"theDescription": fields.String,
"thePriority": fields.Integer,
"theVersion": fields.Integer
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = {
obj_id_field : gen_class_metadata(Requirement)
}
@swagger.model
class AcceptEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theCost': fields.String,
'theRationale': fields.String,
'theEnvironmentName': fields.String
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = { obj_id_field: gen_class_metadata(AcceptEnvironmentProperties) }
@swagger.model
class MitigateEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theDetectionMechanisms': fields.List(fields.String),
'theDetectionPoint': fields.String,
'theType': fields.String,
'theEnvironmentName': fields.String,
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = { obj_id_field: gen_class_metadata(MitigateEnvironmentProperties) }
@swagger.model
@swagger.nested(
theRoles=ValuedRole.__name__
)
class TransferEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theRoles': fields.List(fields.Nested(ValuedRole.resource_fields)),
'theRationale': fields.String,
'theEnvironmentName': fields.String
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = { obj_id_field: gen_class_metadata(TransferEnvironmentProperties) }
@swagger.model
@swagger.nested(
accept=AcceptEnvironmentPropertiesModel.__name__,
mitigate=MitigateEnvironmentPropertiesModel.__name__,
transfer=TransferEnvironmentPropertiesModel.__name__,
)
class ResponseEnvironmentPropertiesModel(object):
resource_fields = {
'accept': fields.List(fields.Nested(AcceptEnvironmentPropertiesModel.resource_fields)),
'mitigate': fields.List(fields.Nested(MitigateEnvironmentPropertiesModel.resource_fields)),
'transfer': fields.List(fields.Nested(TransferEnvironmentPropertiesModel.resource_fields))
}
field_names = resource_fields.keys()
@swagger.model
@swagger.nested(
theEnvironmentProperties=ResponseEnvironmentPropertiesModel.__name__
)
class ResponseModel(object):
resource_fields = {
obj_id_field: fields.String,
'theId': fields.Integer,
'theTags': fields.List(fields.String),
'theRisk': fields.String,
'theName': fields.String,
'theEnvironmentProperties': fields.Nested(ResponseEnvironmentPropertiesModel.resource_fields),
'theResponseType': fields.String
}
required = resource_fields.keys()
required.remove(obj_id_field)
@swagger.model
@swagger.nested(
theMisuseCase=MisuseCaseModel.__name__
)
class RiskModel(object):
resource_fields = {
obj_id_field: fields.String,
"theVulnerabilityName": fields.String,
"theId": fields.Integer,
"theMisuseCase": fields.Nested(MisuseCaseModel.resource_fields),
"theTags": fields.List(fields.Nested(fields.String)),
"theThreatName": fields.String,
"theName": fields.String
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = {
obj_id_field : gen_class_metadata(Risk)
}
@swagger.model
class RoleModel(object):
resource_fields = {
obj_id_field: fields.String,
"theId": fields.Integer,
"theName": fields.String,
"theType": fields.String,
"theShortCode": fields.String,
"theDescription": fields.String,
"theEnvironmentProperties": None
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove("theEnvironmentProperties")
swagger_metadata = {
obj_id_field : gen_class_metadata(Role)
}
@swagger.model
class RoleEnvironmentPropertiesModel(object):
resource_fields = {
"theEnvironmentName": fields.String,
"theResponses": fields.List(fields.List(fields.String)),
"theCountermeasures": fields.List(fields.String)
}
required = resource_fields.keys()
@swagger.model
@swagger.nested(
theProperties=SecurityAttribute.__name__
)
class ThreatEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
'theAssets': fields.List(fields.String),
'theLikelihood': fields.String,
'theEnvironmentName': fields.String,
'theAttackers': fields.List(fields.String),
'theRationale': fields.List(fields.String),
'theProperties': fields.List(fields.Nested(SecurityAttribute.resource_fields)),
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove('theRationale')
swagger_metadata = {
obj_id_field : gen_class_metadata(ThreatEnvironmentProperties),
'theLikelihood' : likelihood_metadata
}
@swagger.model
@swagger.nested(
theEnvironmentProperties=ThreatEnvironmentPropertiesModel.__name__
)
class ThreatModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEnvironmentDictionary': fields.List(fields.String),
'theId': fields.Integer,
'theTags': fields.List(fields.String),
'theThreatPropertyDictionary': fields.List(fields.String),
'theThreatName': fields.String,
'theType': fields.String,
'theMethod': fields.String,
'theEnvironmentProperties': fields.List(fields.Nested(ThreatEnvironmentPropertiesModel.resource_fields)),
'likelihoodLookup': fields.List(fields.String),
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove('theThreatPropertyDictionary')
required.remove('theEnvironmentDictionary')
required.remove('likelihoodLookup')
@swagger.model
class UserConfigModel(object):
resource_fields = {
"user": fields.String,
"passwd": fields.String,
"db": fields.String,
"host": fields.String,
"port": fields.Integer,
"jsonPrettyPrint": fields.String
}
required = resource_fields.keys()
required.remove("jsonPrettyPrint")
swagger_metadata = {
'jsonPrettyPrint':
{
'enum': ['on', 'off']
}
}
@swagger.model
class ValueTypeModel(object):
resource_fields = {
obj_id_field: fields.String,
'theScore': fields.Integer,
'theId': fields.Integer,
'theRationale': fields.String,
'theType': fields.String,
'theName': fields.String,
'theDescription': fields.String,
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = {
obj_id_field: gen_class_metadata(ValueType),
"theType": {
"enum": ['asset_value','threat_value','risk_class','countermeasure_value','capability','motivation','asset_type','threat_type','vulnerability_type','severity','likelihood','access_right','protocol','privilege','surface_type']
}
}
@swagger.model
class VulnerabilityEnvironmentPropertiesModel(object):
resource_fields = {
obj_id_field: fields.String,
"theAssets": fields.List(fields.String),
"theEnvironmentName": fields.String,
"theSeverity": fields.String
}
required = resource_fields.keys()
required.remove(obj_id_field)
swagger_metadata = {
obj_id_field : gen_class_metadata(VulnerabilityEnvironmentProperties),
"theSeverity": severity_metadata
}
@swagger.model
@swagger.nested(
theEnvironmentProperties=VulnerabilityEnvironmentPropertiesModel.__name__,
theEnvironmentDictionary=VulnerabilityEnvironmentPropertiesModel.__name__
)
class VulnerabilityModel(object):
resource_fields = {
obj_id_field: fields.String,
'theEnvironmentDictionary': fields.List(fields.Nested(VulnerabilityEnvironmentPropertiesModel.resource_fields)),
'theVulnerabilityName': fields.String,
'theVulnerabilityType': fields.String,
'theTags': fields.List(fields.String),
'theVulnerabilityDescription': fields.String,
'theVulnerabilityId': fields.Integer,
'severityLookup': fields.List(fields.String),
'theEnvironmentProperties': fields.List(fields.Nested(VulnerabilityEnvironmentPropertiesModel.resource_fields))
}
required = resource_fields.keys()
required.remove(obj_id_field)
required.remove('theEnvironmentDictionary')
required.remove('severityLookup')
swagger_metadata = {
obj_id_field: gen_class_metadata(Vulnerability),
'theVulnerabilityType' : {
"enum": ['Configuration', 'Design', 'Implementation']
}
}
|
|
import os
from nose_parameterized import parameterized
import pandas as pd
from toolz import valmap
import toolz.curried.operator as op
from zipline.assets.synthetic import make_simple_equity_info
from zipline.data.bundles import UnknownBundle, from_bundle_ingest_dirname
from zipline.data.bundles.core import _make_bundle_core
from zipline.lib.adjustment import Float64Multiply
from zipline.pipeline.loaders.synthetic import (
make_bar_data,
expected_bar_values_2d,
)
from zipline.testing import (
subtest,
str_to_seconds,
tmp_trading_env,
)
from zipline.testing.fixtures import WithInstanceTmpDir, ZiplineTestCase
from zipline.testing.predicates import (
assert_equal,
assert_false,
assert_in,
assert_is,
assert_is_instance,
assert_is_none,
assert_raises,
assert_true,
)
from zipline.utils.cache import dataframe_cache
from zipline.utils.functional import apply
from zipline.utils.tradingcalendar import trading_days
import zipline.utils.paths as pth
_1_ns = pd.Timedelta(1, unit='ns')
class BundleCoreTestCase(WithInstanceTmpDir, ZiplineTestCase):
def init_instance_fixtures(self):
super(BundleCoreTestCase, self).init_instance_fixtures()
(self.bundles,
self.register,
self.unregister,
self.ingest,
self.load,
self.clean) = _make_bundle_core()
self.environ = {'ZIPLINE_ROOT': self.instance_tmpdir.path}
def test_register_decorator(self):
@apply
@subtest(((c,) for c in 'abcde'), 'name')
def _(name):
@self.register(name)
def ingest(*args):
pass
assert_in(name, self.bundles)
assert_is(self.bundles[name].ingest, ingest)
self._check_bundles(set('abcde'))
def test_register_call(self):
def ingest(*args):
pass
@apply
@subtest(((c,) for c in 'abcde'), 'name')
def _(name):
self.register(name, ingest)
assert_in(name, self.bundles)
assert_is(self.bundles[name].ingest, ingest)
assert_equal(
valmap(op.attrgetter('ingest'), self.bundles),
{k: ingest for k in 'abcde'},
)
self._check_bundles(set('abcde'))
def _check_bundles(self, names):
assert_equal(set(self.bundles.keys()), names)
for name in names:
self.unregister(name)
assert_false(self.bundles)
def test_register_no_create(self):
called = [False]
@self.register('bundle', create_writers=False)
def bundle_ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir):
assert_is_none(asset_db_writer)
assert_is_none(minute_bar_writer)
assert_is_none(daily_bar_writer)
assert_is_none(adjustment_writer)
called[0] = True
self.ingest('bundle', self.environ)
assert_true(called[0])
def test_ingest(self):
env = self.enter_instance_context(tmp_trading_env())
start = pd.Timestamp('2014-01-06', tz='utc')
end = pd.Timestamp('2014-01-10', tz='utc')
calendar = trading_days[trading_days.slice_indexer(start, end)]
minutes = env.minutes_for_days_in_range(calendar[0], calendar[-1])
sids = tuple(range(3))
equities = make_simple_equity_info(
sids,
calendar[0],
calendar[-1],
)
daily_bar_data = make_bar_data(equities, calendar)
minute_bar_data = make_bar_data(equities, minutes)
first_split_ratio = 0.5
second_split_ratio = 0.1
splits = pd.DataFrame.from_records([
{
'effective_date': str_to_seconds('2014-01-08'),
'ratio': first_split_ratio,
'sid': 0,
},
{
'effective_date': str_to_seconds('2014-01-09'),
'ratio': second_split_ratio,
'sid': 1,
},
])
@self.register('bundle',
calendar=calendar,
opens=env.opens_in_range(calendar[0], calendar[-1]),
closes=env.closes_in_range(calendar[0], calendar[-1]))
def bundle_ingest(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir):
assert_is(environ, self.environ)
asset_db_writer.write(equities=equities)
minute_bar_writer.write(minute_bar_data)
daily_bar_writer.write(daily_bar_data)
adjustment_writer.write(splits=splits)
assert_is_instance(calendar, pd.DatetimeIndex)
assert_is_instance(cache, dataframe_cache)
assert_is_instance(show_progress, bool)
self.ingest('bundle', environ=self.environ)
bundle = self.load('bundle', environ=self.environ)
assert_equal(set(bundle.asset_finder.sids), set(sids))
columns = 'open', 'high', 'low', 'close', 'volume'
actual = bundle.minute_bar_reader.load_raw_arrays(
columns,
minutes[0],
minutes[-1],
sids,
)
for actual_column, colname in zip(actual, columns):
assert_equal(
actual_column,
expected_bar_values_2d(minutes, equities, colname),
msg=colname,
)
actual = bundle.daily_bar_reader.load_raw_arrays(
columns,
calendar[0],
calendar[-1],
sids,
)
for actual_column, colname in zip(actual, columns):
assert_equal(
actual_column,
expected_bar_values_2d(calendar, equities, colname),
msg=colname,
)
adjustments_for_cols = bundle.adjustment_reader.load_adjustments(
columns,
calendar,
pd.Index(sids),
)
for column, adjustments in zip(columns, adjustments_for_cols[:-1]):
# iterate over all the adjustments but `volume`
assert_equal(
adjustments,
{
2: [Float64Multiply(
first_row=0,
last_row=2,
first_col=0,
last_col=0,
value=first_split_ratio,
)],
3: [Float64Multiply(
first_row=0,
last_row=3,
first_col=1,
last_col=1,
value=second_split_ratio,
)],
},
msg=column,
)
# check the volume, the value should be 1/ratio
assert_equal(
adjustments_for_cols[-1],
{
2: [Float64Multiply(
first_row=0,
last_row=2,
first_col=0,
last_col=0,
value=1 / first_split_ratio,
)],
3: [Float64Multiply(
first_row=0,
last_row=3,
first_col=1,
last_col=1,
value=1 / second_split_ratio,
)],
},
msg='volume',
)
@parameterized.expand([('clean',), ('load',)])
def test_bundle_doesnt_exist(self, fnname):
with assert_raises(UnknownBundle) as e:
getattr(self, fnname)('ayy', environ=self.environ)
assert_equal(e.exception.name, 'ayy')
def test_load_no_data(self):
# register but do not ingest data
self.register('bundle', lambda *args: None)
ts = pd.Timestamp('2014')
with assert_raises(ValueError) as e:
self.load('bundle', timestamp=ts, environ=self.environ)
assert_in(
"no data for bundle 'bundle' on or before %s" % ts,
str(e.exception),
)
def _list_bundle(self):
return {
os.path.join(pth.data_path(['bundle', d], environ=self.environ))
for d in os.listdir(
pth.data_path(['bundle'], environ=self.environ),
)
}
def _empty_ingest(self, _wrote_to=[]):
"""Run the nth empty ingest.
Returns
-------
wrote_to : str
The timestr of the bundle written.
"""
if not self.bundles:
@self.register('bundle',
calendar=pd.DatetimeIndex([pd.Timestamp('2014')]))
def _(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
cache,
show_progress,
output_dir):
_wrote_to.append(output_dir)
_wrote_to.clear()
self.ingest('bundle', environ=self.environ)
assert_equal(len(_wrote_to), 1, msg='ingest was called more than once')
ingestions = self._list_bundle()
assert_in(
_wrote_to[0],
ingestions,
msg='output_dir was not in the bundle directory',
)
return _wrote_to[0]
def test_clean_keep_last(self):
first = self._empty_ingest()
assert_equal(
self.clean('bundle', keep_last=1, environ=self.environ),
set(),
)
assert_equal(
self._list_bundle(),
{first},
msg='directory should not have changed',
)
second = self._empty_ingest()
assert_equal(
self._list_bundle(),
{first, second},
msg='two ingestions are not present',
)
assert_equal(
self.clean('bundle', keep_last=1, environ=self.environ),
{first},
)
assert_equal(
self._list_bundle(),
{second},
msg='first ingestion was not removed with keep_last=2',
)
third = self._empty_ingest()
fourth = self._empty_ingest()
fifth = self._empty_ingest()
assert_equal(
self._list_bundle(),
{second, third, fourth, fifth},
msg='larger set of ingestions did not happen correctly',
)
assert_equal(
self.clean('bundle', keep_last=2, environ=self.environ),
{second, third},
)
assert_equal(
self._list_bundle(),
{fourth, fifth},
msg='keep_last=2 did not remove the correct number of ingestions',
)
@staticmethod
def _ts_of_run(run):
return from_bundle_ingest_dirname(run.rsplit(os.path.sep, 1)[-1])
def test_clean_before_after(self):
first = self._empty_ingest()
assert_equal(
self.clean(
'bundle',
before=self._ts_of_run(first),
environ=self.environ,
),
set(),
)
assert_equal(
self._list_bundle(),
{first},
msg='directory should not have changed (before)',
)
assert_equal(
self.clean(
'bundle',
after=self._ts_of_run(first),
environ=self.environ,
),
set(),
)
assert_equal(
self._list_bundle(),
{first},
msg='directory should not have changed (after)',
)
assert_equal(
self.clean(
'bundle',
before=self._ts_of_run(first) + _1_ns,
environ=self.environ,
),
{first},
)
assert_equal(
self._list_bundle(),
set(),
msg='directory now be empty (before)',
)
second = self._empty_ingest()
assert_equal(
self.clean(
'bundle',
after=self._ts_of_run(second) - _1_ns,
environ=self.environ,
),
{second},
)
assert_equal(
self._list_bundle(),
set(),
msg='directory now be empty (after)',
)
third = self._empty_ingest()
fourth = self._empty_ingest()
fifth = self._empty_ingest()
sixth = self._empty_ingest()
assert_equal(
self._list_bundle(),
{third, fourth, fifth, sixth},
msg='larger set of ingestions did no happen correctly',
)
assert_equal(
self.clean(
'bundle',
before=self._ts_of_run(fourth),
after=self._ts_of_run(fifth),
environ=self.environ,
),
{third, sixth},
)
assert_equal(
self._list_bundle(),
{fourth, fifth},
msg='did not strip first and last directories',
)
|
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test BIP68 implementation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(BitcoinTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-blockprioritysize=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-blockprioritysize=0", "-acceptnonstdtxn=0"]))
self.is_network_split = False
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
connect_nodes(self.nodes[0], 1)
def run_test(self):
# Generate some coins
self.nodes[0].generate(110)
print "Running test disable flag"
self.test_disable_flag()
print "Running test sequence-lock-confirmed-inputs"
self.test_sequence_lock_confirmed_inputs()
print "Running test sequence-lock-unconfirmed-inputs"
self.test_sequence_lock_unconfirmed_inputs()
print "Running test BIP68 not consensus before versionbits activation"
self.test_bip68_not_consensus()
print "Verifying nVersion=2 transactions aren't standard"
self.test_version2_relay(before_activation=True)
print "Activating BIP68 (and 112/113)"
self.activateCSV()
print "Verifying nVersion=2 transactions are now standard"
self.test_version2_relay(before_activation=False)
print "Passed\n"
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 BTC
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx2))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in xrange(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in xrange(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in xrange(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
try:
self.nodes[0].sendrawtransaction(rawtx)
except JSONRPCException as exp:
assert(not should_pass and using_sequence_locks)
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(should_pass or not using_sequence_locks)
# Recalculate utxos if we successfully sent the transaction
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
try:
node.sendrawtransaction(ToHex(tx))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
assert(orig_tx.hash in node.getrawmempool())
else:
# orig_tx must not be in mempool
assert(orig_tx.hash not in node.getrawmempool())
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(tx2.hash, -1e15, int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in xrange(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(tx2.hash, 1e15, int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
try:
self.nodes[0].sendrawtransaction(raw_tx5)
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in xrange(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
try:
self.nodes[0].sendrawtransaction(ToHex(tx3))
except JSONRPCException as exp:
assert_equal(exp.error["message"], NOT_FINAL_ERROR)
else:
assert(False)
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert(height < 432)
self.nodes[0].generate(432-height)
assert(get_bip9_status(self.nodes[0], 'csv')['status'] == 'active')
sync_blocks(self.nodes)
# Use self.nodes[1] to test standardness relay policy
def test_version2_relay(self, before_activation):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
try:
tx_id = self.nodes[1].sendrawtransaction(tx_signed)
assert(before_activation == False)
except:
assert(before_activation)
if __name__ == '__main__':
BIP68Test().main()
|
|
#
# This file is part of pyasn1-modules software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
# X.509 message syntax
#
# ASN.1 source from:
# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn
# http://www.ietf.org/rfc/rfc2459.txt
#
# Sample captures from:
# http://wiki.wireshark.org/SampleCaptures/
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import namedval
from pyasn1.type import opentype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
MAX = float('inf')
#
# PKIX1Explicit88
#
# Upper Bounds
ub_name = univ.Integer(32768)
ub_common_name = univ.Integer(64)
ub_locality_name = univ.Integer(128)
ub_state_name = univ.Integer(128)
ub_organization_name = univ.Integer(64)
ub_organizational_unit_name = univ.Integer(64)
ub_title = univ.Integer(64)
ub_match = univ.Integer(128)
ub_emailaddress_length = univ.Integer(128)
ub_common_name_length = univ.Integer(64)
ub_country_name_alpha_length = univ.Integer(2)
ub_country_name_numeric_length = univ.Integer(3)
ub_domain_defined_attributes = univ.Integer(4)
ub_domain_defined_attribute_type_length = univ.Integer(8)
ub_domain_defined_attribute_value_length = univ.Integer(128)
ub_domain_name_length = univ.Integer(16)
ub_extension_attributes = univ.Integer(256)
ub_e163_4_number_length = univ.Integer(15)
ub_e163_4_sub_address_length = univ.Integer(40)
ub_generation_qualifier_length = univ.Integer(3)
ub_given_name_length = univ.Integer(16)
ub_initials_length = univ.Integer(5)
ub_integer_options = univ.Integer(256)
ub_numeric_user_id_length = univ.Integer(32)
ub_organization_name_length = univ.Integer(64)
ub_organizational_unit_name_length = univ.Integer(32)
ub_organizational_units = univ.Integer(4)
ub_pds_name_length = univ.Integer(16)
ub_pds_parameter_length = univ.Integer(30)
ub_pds_physical_address_lines = univ.Integer(6)
ub_postal_code_length = univ.Integer(16)
ub_surname_length = univ.Integer(40)
ub_terminal_id_length = univ.Integer(24)
ub_unformatted_address_length = univ.Integer(180)
ub_x121_address_length = univ.Integer(16)
class UniversalString(char.UniversalString):
pass
class BMPString(char.BMPString):
pass
class UTF8String(char.UTF8String):
pass
id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2')
id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48')
id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1')
id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2')
id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1')
id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
id_at = univ.ObjectIdentifier('2.5.4')
id_at_name = univ.ObjectIdentifier('2.5.4.41')
# preserve misspelled variable for compatibility
id_at_sutname = id_at_surname = univ.ObjectIdentifier('2.5.4.4')
id_at_givenName = univ.ObjectIdentifier('2.5.4.42')
id_at_initials = univ.ObjectIdentifier('2.5.4.43')
id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44')
class X520name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
)
id_at_commonName = univ.ObjectIdentifier('2.5.4.3')
class X520CommonName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
)
id_at_localityName = univ.ObjectIdentifier('2.5.4.7')
class X520LocalityName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
)
id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8')
class X520StateOrProvinceName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
)
id_at_organizationName = univ.ObjectIdentifier('2.5.4.10')
class X520OrganizationName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
)
id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11')
class X520OrganizationalUnitName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('printableString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('universalString', char.UniversalString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
namedtype.NamedType('bmpString', char.BMPString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
)
id_at_title = univ.ObjectIdentifier('2.5.4.12')
class X520Title(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
namedtype.NamedType('bmpString',
char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
)
id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46')
class X520dnQualifier(char.PrintableString):
pass
id_at_countryName = univ.ObjectIdentifier('2.5.4.6')
class X520countryName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2)
pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9')
emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1')
class Pkcs9email(char.IA5String):
subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length)
# ----
class DSAPrivateKey(univ.Sequence):
"""PKIX compliant DSA private key structure"""
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))),
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('public', univ.Integer()),
namedtype.NamedType('private', univ.Integer())
)
# ----
class DirectoryString(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('teletexString',
char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('printableString',
char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('universalString',
char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('utf8String',
char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
# hm, this should not be here!? XXX
)
# certificate and CRL specific structures begin here
class AlgorithmIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
namedtype.OptionalNamedType('parameters', univ.Any())
)
# Algorithm OIDs and parameter structures
pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
class Dss_Sig_Value(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('r', univ.Integer()),
namedtype.NamedType('s', univ.Integer())
)
dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
class ValidationParms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('seed', univ.BitString()),
namedtype.NamedType('pgenCounter', univ.Integer())
)
class DomainParameters(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('g', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('j', univ.Integer()),
namedtype.OptionalNamedType('validationParms', ValidationParms())
)
id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
class Dss_Parms(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('p', univ.Integer()),
namedtype.NamedType('q', univ.Integer()),
namedtype.NamedType('g', univ.Integer())
)
# x400 address syntax starts here
teletex_domain_defined_attributes = univ.Integer(6)
class TeletexDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.TeletexString())
)
class TeletexDomainDefinedAttributes(univ.SequenceOf):
componentType = TeletexDomainDefinedAttribute()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
terminal_type = univ.Integer(23)
class TerminalType(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options)
namedValues = namedval.NamedValues(
('telex', 3),
('teletelex', 4),
('g3-facsimile', 5),
('g4-facsimile', 6),
('ia5-terminal', 7),
('videotex', 8)
)
class PresentationAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3),
subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
)
extended_network_address = univ.Integer(22)
class E163_4_address(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('number', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtendedNetworkAddress(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('e163-4-address', E163_4_address()),
namedtype.NamedType('psap-address', PresentationAddress().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class PDSParameter(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
)
local_postal_attributes = univ.Integer(21)
class LocalPostalAttributes(PDSParameter):
pass
class UniquePostalName(PDSParameter):
pass
unique_postal_name = univ.Integer(20)
poste_restante_address = univ.Integer(19)
class PosteRestanteAddress(PDSParameter):
pass
post_office_box_address = univ.Integer(18)
class PostOfficeBoxAddress(PDSParameter):
pass
street_address = univ.Integer(17)
class StreetAddress(PDSParameter):
pass
class UnformattedPostalAddress(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))),
namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
)
physical_delivery_office_name = univ.Integer(10)
class PhysicalDeliveryOfficeName(PDSParameter):
pass
physical_delivery_office_number = univ.Integer(11)
class PhysicalDeliveryOfficeNumber(PDSParameter):
pass
extension_OR_address_components = univ.Integer(12)
class ExtensionORAddressComponents(PDSParameter):
pass
physical_delivery_personal_name = univ.Integer(13)
class PhysicalDeliveryPersonalName(PDSParameter):
pass
physical_delivery_organization_name = univ.Integer(14)
class PhysicalDeliveryOrganizationName(PDSParameter):
pass
extension_physical_delivery_address_components = univ.Integer(15)
class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
pass
unformatted_postal_address = univ.Integer(16)
postal_code = univ.Integer(9)
class PostalCode(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
namedtype.NamedType('printable-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
)
class PhysicalDeliveryCountryName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class PDSName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length)
physical_delivery_country_name = univ.Integer(8)
class TeletexOrganizationalUnitName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
pds_name = univ.Integer(7)
teletex_organizational_unit_names = univ.Integer(5)
class TeletexOrganizationalUnitNames(univ.SequenceOf):
componentType = TeletexOrganizationalUnitName()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
teletex_personal_name = univ.Integer(4)
class TeletexPersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
teletex_organization_name = univ.Integer(3)
class TeletexOrganizationName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
teletex_common_name = univ.Integer(2)
class TeletexCommonName(char.TeletexString):
subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
class CommonName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
common_name = univ.Integer(1)
class ExtensionAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('extension-attribute-value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class ExtensionAttributes(univ.SetOf):
componentType = ExtensionAttribute()
sizeSpec = univ.SetOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes)
class BuiltInDomainDefinedAttribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
namedtype.NamedType('value', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
)
class BuiltInDomainDefinedAttributes(univ.SequenceOf):
componentType = BuiltInDomainDefinedAttribute()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
class OrganizationalUnitName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
class OrganizationalUnitNames(univ.SequenceOf):
componentType = OrganizationalUnitName()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
class PersonalName(univ.Set):
componentType = namedtype.NamedTypes(
namedtype.NamedType('surname', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class NumericUserIdentifier(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
class OrganizationName(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
class PrivateDomainName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
)
class TerminalIdentifier(char.PrintableString):
subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length)
class X121Address(char.NumericString):
subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length)
class NetworkAddress(X121Address):
pass
class AdministrationDomainName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('numeric', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
namedtype.NamedType('printable', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
)
class CountryName(univ.Choice):
tagSet = univ.Choice.tagSet.tagExplicitly(
tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
)
componentType = namedtype.NamedTypes(
namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
ub_country_name_numeric_length))),
namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
)
class BuiltInStandardAttributes(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('country-name', CountryName()),
namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
)
class ORAddress(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
)
#
# PKIX1Implicit88
#
id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24')
class InvalidityDate(useful.GeneralizedTime):
pass
id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1')
id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2')
id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3')
holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2')
id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23')
class HoldInstructionCode(univ.ObjectIdentifier):
pass
id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21')
class CRLReason(univ.Enumerated):
namedValues = namedval.NamedValues(
('unspecified', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6),
('removeFromCRL', 8)
)
id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20')
class CRLNumber(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
class BaseCRLNumber(CRLNumber):
pass
id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1')
id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2')
id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3')
id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4')
id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5')
id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6')
id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7')
id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8')
id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37')
class KeyPurposeId(univ.ObjectIdentifier):
pass
class ExtKeyUsageSyntax(univ.SequenceOf):
componentType = KeyPurposeId()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class ReasonFlags(univ.BitString):
namedValues = namedval.NamedValues(
('unused', 0),
('keyCompromise', 1),
('cACompromise', 2),
('affiliationChanged', 3),
('superseded', 4),
('cessationOfOperation', 5),
('certificateHold', 6)
)
class SkipCerts(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36')
class PolicyConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19')
class BasicConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('cA', univ.Boolean(False)),
namedtype.OptionalNamedType('pathLenConstraint',
univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
)
id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9')
class EDIPartyName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('partyName',
DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27')
class BaseDistance(univ.Integer):
subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX)
id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31')
id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28')
id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30')
class DisplayText(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('visibleString',
char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
)
class NoticeReference(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('organization', DisplayText()),
namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
)
class UserNotice(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('noticeRef', NoticeReference()),
namedtype.OptionalNamedType('explicitText', DisplayText())
)
class CPSuri(char.IA5String):
pass
class PolicyQualifierId(univ.ObjectIdentifier):
subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice)
class CertPolicyId(univ.ObjectIdentifier):
pass
class PolicyQualifierInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
namedtype.NamedType('qualifier', univ.Any())
)
id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32')
class PolicyInformation(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('policyIdentifier', CertPolicyId()),
namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class CertificatePolicies(univ.SequenceOf):
componentType = PolicyInformation()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33')
class PolicyMapping(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
)
class PolicyMappings(univ.SequenceOf):
componentType = PolicyMapping()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16')
class PrivateKeyUsagePeriod(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15')
class KeyUsage(univ.BitString):
namedValues = namedval.NamedValues(
('digitalSignature', 0),
('nonRepudiation', 1),
('keyEncipherment', 2),
('dataEncipherment', 3),
('keyAgreement', 4),
('keyCertSign', 5),
('cRLSign', 6),
('encipherOnly', 7),
('decipherOnly', 8)
)
id_ce = univ.ObjectIdentifier('2.5.29')
id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35')
class KeyIdentifier(univ.OctetString):
pass
id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14')
class SubjectKeyIdentifier(KeyIdentifier):
pass
id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29')
id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17')
id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18')
class AttributeValue(univ.Any):
pass
class AttributeType(univ.ObjectIdentifier):
pass
certificateAttributesMap = {}
class AttributeTypeAndValue(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('value', AttributeValue(),
openType=opentype.OpenType('type', certificateAttributesMap))
)
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
)
class SubjectDirectoryAttributes(univ.SequenceOf):
componentType = Attribute()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class RelativeDistinguishedName(univ.SetOf):
componentType = AttributeTypeAndValue()
class RDNSequence(univ.SequenceOf):
componentType = RelativeDistinguishedName()
class Name(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('', RDNSequence())
)
class CertificateSerialNumber(univ.Integer):
pass
class AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value',
univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class GeneralName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName',
AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('rfc822Name',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('dNSName',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.NamedType('x400Address',
ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('directoryName',
Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
namedtype.NamedType('ediPartyName',
EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
namedtype.NamedType('uniformResourceIdentifier',
char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
)
class GeneralNames(univ.SequenceOf):
componentType = GeneralName()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class AccessDescription(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
namedtype.NamedType('accessLocation', GeneralName())
)
class AuthorityInfoAccessSyntax(univ.SequenceOf):
componentType = AccessDescription()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class AuthorityKeyIdentifier(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class DistributionPointName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('fullName', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class DistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
)
class CRLDistPointsSyntax(univ.SequenceOf):
componentType = DistributionPoint()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class IssuingDistributionPoint(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
)
class GeneralSubtree(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('base', GeneralName()),
namedtype.DefaultedNamedType('minimum', BaseDistance(0).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class GeneralSubtrees(univ.SequenceOf):
componentType = GeneralSubtree()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class NameConstraints(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class CertificateIssuer(GeneralNames):
pass
class SubjectAltName(GeneralNames):
pass
class IssuerAltName(GeneralNames):
pass
certificateExtensionsMap = {}
class Extension(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extnID', univ.ObjectIdentifier()),
namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
namedtype.NamedType('extnValue', univ.OctetString(),
openType=opentype.OpenType('extnID', certificateExtensionsMap))
)
class Extensions(univ.SequenceOf):
componentType = Extension()
sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
class SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', AlgorithmIdentifier()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
class UniqueIdentifier(univ.BitString):
pass
class Time(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('utcTime', useful.UTCTime()),
namedtype.NamedType('generalTime', useful.GeneralizedTime())
)
class Validity(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('notBefore', Time()),
namedtype.NamedType('notAfter', Time())
)
class Version(univ.Integer):
namedValues = namedval.NamedValues(
('v1', 0), ('v2', 1), ('v3', 2)
)
class TBSCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.DefaultedNamedType('version', Version('v1').subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('serialNumber', CertificateSerialNumber()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('validity', Validity()),
namedtype.NamedType('subject', Name()),
namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
namedtype.OptionalNamedType('extensions', Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
)
class Certificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificate', TBSCertificate()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signatureValue', univ.BitString())
)
# CRL structures
class RevokedCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('userCertificate', CertificateSerialNumber()),
namedtype.NamedType('revocationDate', Time()),
namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
)
class TBSCertList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('version', Version()),
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('thisUpdate', Time()),
namedtype.OptionalNamedType('nextUpdate', Time()),
namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())),
namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class CertificateList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertList', TBSCertList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
# map of AttributeType -> AttributeValue
_certificateAttributesMapUpdate = {
id_at_name: X520name(),
id_at_surname: X520name(),
id_at_givenName: X520name(),
id_at_initials: X520name(),
id_at_generationQualifier: X520name(),
id_at_commonName: X520CommonName(),
id_at_localityName: X520LocalityName(),
id_at_stateOrProvinceName: X520StateOrProvinceName(),
id_at_organizationName: X520OrganizationName(),
id_at_organizationalUnitName: X520OrganizationalUnitName(),
id_at_title: X520Title(),
id_at_dnQualifier: X520dnQualifier(),
id_at_countryName: X520countryName(),
emailAddress: Pkcs9email(),
}
certificateAttributesMap.update(_certificateAttributesMapUpdate)
# map of Certificate Extension OIDs to Extensions
_certificateExtensionsMapUpdate = {
id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
id_ce_keyUsage: KeyUsage(),
id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
# TODO
# id_ce_certificatePolicies: PolicyInformation(), # could be a sequence of concat'ed objects?
id_ce_policyMappings: PolicyMappings(),
id_ce_subjectAltName: SubjectAltName(),
id_ce_issuerAltName: IssuerAltName(),
id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
id_ce_basicConstraints: BasicConstraints(),
id_ce_nameConstraints: NameConstraints(),
id_ce_policyConstraints: PolicyConstraints(),
id_ce_extKeyUsage: ExtKeyUsageSyntax(),
id_ce_cRLDistributionPoints: CRLDistPointsSyntax(),
id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
id_ce_cRLNumber: univ.Integer(),
id_ce_deltaCRLIndicator: BaseCRLNumber(),
id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
id_ce_cRLReasons: CRLReason(),
id_ce_holdInstructionCode: univ.ObjectIdentifier(),
id_ce_invalidityDate: useful.GeneralizedTime(),
id_ce_certificateIssuer: GeneralNames(),
}
certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
|
|
"""JSON Validator
Copyright 2009 Google Inc.
http://code.google.com/p/google-mobwrite/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = 'fraser@google.com (Neil Fraser)'
# States
ERROR = -1
GROUND = 0
STRING = 1
STRINGSLASH = 2
UNICODE1 = 3
UNICODE2 = 4
UNICODE3 = 5
UNICODE4 = 6
TRUE1 = 7
TRUE2 = 8
TRUE3 = 9
FALSE1 = 10
FALSE2 = 11
FALSE3 = 12
FALSE4 = 13
NULL1 = 14
NULL2 = 15
NULL3 = 16
NEGATIVE = 17
ZERO = 18
NUMBER = 19
DECIMALBAD = 20
DECIMALOK = 21
EXPONENT1 = 22
EXPONENT2 = 23
EXPONENT3 = 24
# Tokens
OBJECTSTART = 1
OBJECTEND = 2
ARRAYSTART = 3
ARRAYEND = 4
COLON = 5
COMMA = 6
STRVALUE = 7
VALUE = 8 # true, false, null, number
# Transformations
TRANSFORMATIONS = {}
def _add_rule(startState, characters, endState, token):
"""Add a rule to the transformations map.
Args:
startState: This rule only applies if the parser is in this state.
characters: This rule only applies if the current character is one of these.
endState: When applied, this rule changes the state this.
token: When applied, this rule adds this token to the stack.
"""
# None is treated as a wildcard character.
if characters == None:
TRANSFORMATIONS[(startState, None)] = (endState, token)
else:
# Create a rule for every character.
for char in characters:
TRANSFORMATIONS[(startState, char)] = (endState, token)
_add_rule(GROUND, " \r\n", GROUND, None)
_add_rule(GROUND, "[", GROUND, ARRAYSTART)
_add_rule(GROUND, "]", GROUND, ARRAYEND)
_add_rule(GROUND, "{", GROUND, OBJECTSTART)
_add_rule(GROUND, "}", GROUND, OBJECTEND)
_add_rule(GROUND, ",", GROUND, COMMA)
_add_rule(GROUND, ":", GROUND, COLON)
_add_rule(GROUND, "\"", STRING, None)
_add_rule(STRING, "\"", GROUND, STRVALUE)
_add_rule(STRING, "\\", STRINGSLASH, None)
_add_rule(STRINGSLASH, "\"\\/bfnrt", STRING, None)
_add_rule(STRINGSLASH, "u", UNICODE1, None)
_add_rule(UNICODE1, "0123456789abcdefABCDEF", UNICODE2, None)
_add_rule(UNICODE2, "0123456789abcdefABCDEF", UNICODE3, None)
_add_rule(UNICODE3, "0123456789abcdefABCDEF", UNICODE4, None)
_add_rule(UNICODE4, "0123456789abcdefABCDEF", STRING, None)
_add_rule(STRING, "\b\f\n\r", ERROR, None)
_add_rule(STRING, None, STRING, None)
_add_rule(GROUND, "t", TRUE1, None)
_add_rule(TRUE1, "r", TRUE2, None)
_add_rule(TRUE2, "u", TRUE3, None)
_add_rule(TRUE3, "e", GROUND, VALUE)
_add_rule(GROUND, "f", FALSE1, None)
_add_rule(FALSE1, "a", FALSE2, None)
_add_rule(FALSE2, "l", FALSE3, None)
_add_rule(FALSE3, "s", FALSE4, None)
_add_rule(FALSE4, "e", GROUND, VALUE)
_add_rule(GROUND, "n", NULL1, None)
_add_rule(NULL1, "u", NULL2, None)
_add_rule(NULL2, "l", NULL3, None)
_add_rule(NULL3, "l", GROUND, VALUE)
_add_rule(GROUND, "-", NEGATIVE, None)
_add_rule(GROUND, "0", ZERO, VALUE)
_add_rule(GROUND, "123456789", NUMBER, VALUE)
_add_rule(NEGATIVE, "0", NUMBER, VALUE)
_add_rule(NEGATIVE, "123456789", NUMBER, VALUE)
_add_rule(NUMBER, "0123456789", NUMBER, None)
_add_rule(NUMBER, ".", DECIMALBAD, None)
_add_rule(ZERO, ".", DECIMALBAD, None)
_add_rule(DECIMALBAD, "0123456789", DECIMALOK, None)
_add_rule(DECIMALOK, "0123456789", DECIMALOK, None)
_add_rule(NUMBER, "eE", EXPONENT1, None)
_add_rule(ZERO, "eE", EXPONENT1, None)
_add_rule(DECIMALOK, "eE", EXPONENT1, None)
_add_rule(EXPONENT1, "+-", EXPONENT2, None)
_add_rule(EXPONENT1, "0123456789", EXPONENT3, None)
_add_rule(EXPONENT2, "0123456789", EXPONENT3, None)
_add_rule(EXPONENT3, "0123456789", EXPONENT3, None)
_add_rule(EXPONENT3, " \r\n", GROUND, None)
_add_rule(EXPONENT3, ",", GROUND, COMMA)
_add_rule(EXPONENT3, ":", GROUND, COLON)
_add_rule(EXPONENT3, "]", GROUND, ARRAYEND)
_add_rule(EXPONENT3, "}", GROUND, OBJECTEND)
_add_rule(DECIMALOK, " \r\n", GROUND, None)
_add_rule(DECIMALOK, ",", GROUND, COMMA)
_add_rule(DECIMALOK, ":", GROUND, COLON)
_add_rule(DECIMALOK, "]", GROUND, ARRAYEND)
_add_rule(DECIMALOK, "}", GROUND, OBJECTEND)
_add_rule(NUMBER, " \r\n", GROUND, None)
_add_rule(NUMBER, ",", GROUND, COMMA)
_add_rule(NUMBER, ":", GROUND, COLON)
_add_rule(NUMBER, "]", GROUND, ARRAYEND)
_add_rule(NUMBER, "}", GROUND, OBJECTEND)
_add_rule(ZERO, " \r\n", GROUND, None)
_add_rule(ZERO, ",", GROUND, COMMA)
_add_rule(ZERO, ":", GROUND, COLON)
_add_rule(ZERO, "]", GROUND, ARRAYEND)
_add_rule(ZERO, "}", GROUND, OBJECTEND)
# List of states which are acceptable to end in.
EXITSTATES = (GROUND, NUMBER, ZERO, EXPONENT3)
def is_valid(string):
"""Returns true if the string is valid syntax for a JSON array or object.
Args:
string: JSON string to check.
Returns:
True iff JSON string is valid.
"""
state = GROUND
tokens = []
for char in string:
# Transform from this state to the next state.
next = TRANSFORMATIONS.get((state, char))
if next == None:
# No matching character, check for a wildcard match.
next = TRANSFORMATIONS.get((state, None))
if next == None:
return False
(state, token) = next
if token != None:
tokens.append(token)
if not state in EXITSTATES:
# A half-defined value.
return False
if not tokens or (tokens[0] != ARRAYSTART and tokens[0] != OBJECTSTART):
# Root value must be array or object.
return False
if not _pop_value(tokens):
# Not a value.
return False
if tokens:
# Leftover tokens beyond first value.
return False
return True
def _pop_value(tokens):
"""Do the provided JSON tokens form a value? Starting from the end, pop
tokens off the list as they are used. Unused tokens remain on the list.
This function is recursive.
Args:
tokens: List of JSON tokens.
Returns:
True iff JSON value is found.
"""
if not tokens:
# Empty
return False
# Work backwards since t.pop() is much more efficent than del t[0].
token = tokens.pop()
if token == VALUE or token == STRVALUE:
return True
if token == ARRAYEND:
has_value = False
while tokens:
if tokens[-1] == ARRAYSTART:
tokens.pop()
return True
if has_value:
if tokens[-1] != COMMA:
# Values not comma separated.
return False
tokens.pop()
if not _pop_value(tokens):
# Array contains non-value.
return False
has_value = True
# Ran out of tokens looking for "["
return False
if token == OBJECTEND:
has_value = False
while tokens:
if tokens[-1] == OBJECTSTART:
tokens.pop()
return True
if has_value:
if tokens[-1] != COMMA:
# Pairs not comma separated.
return False
tokens.pop()
if not _pop_value(tokens):
# Object contains non-value.
return False
has_value = True
if not tokens:
break
if tokens[-1] != COLON:
# Name:value not colon separated.
return False
tokens.pop()
if not tokens:
break
if tokens[-1] != STRVALUE:
# Object property not a string.
return False
tokens.pop()
# Ran out of tokens looking for "{"
return False
# Must be a comma or colon.
return False
|
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_swiftpm = False
args.test_swift_driver = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftformat = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
# If building natively on an Android host, allow running the test suite
# without the NDK config.
if not StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget
.host_target().name):
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option('--install-all', toggle_true,
help='Assume all built products should be installed')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
option('--llvm-install-components', store,
default=defaults.llvm_install_components(),
help='A semi-colon split list of llvm components to install')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option('--infer', store_true('infer_dependencies'),
help='Infer any downstream dependencies from enabled projects')
option(['-l', '--lldb'], store_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], store_true('build_llbuild'),
help='build llbuild')
option(['--libcxx'], store_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], store_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], store_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftformat'], store_true('build_swiftformat'),
help='build swift-format')
option(['--swiftevolve'], store_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--swift-driver'], toggle_true('build_swift_driver'),
help='build swift-driver')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option('--test-indexstore-db-sanitize-all',
toggle_true('test_indexstoredb_sanitize_all'),
help='run indexstore-db tests under all sanitizers')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--test-sourcekit-lsp-sanitize-all',
toggle_true('test_sourcekitlsp_sanitize_all'),
help='run sourcekit-lsp tests under all sanitizers')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current master')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swift-driver'], toggle_true('install_swift_driver'),
help='install new Swift driver')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option(['--swift-inspect'],
toggle_true('build_swift_inspect'),
help='build SwiftInspect using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
store_true('install_playgroundsupport'),
help='install playground support')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], store_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swift-driver', toggle_false('test_swift_driver'),
help='skip testing Swift driver')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftformat', toggle_false('test_swiftformat'),
help='skip testing swift-format')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
option('--skip-test-swift-inspect',
toggle_false('test_swift_inspect'),
help='skip testing swift_inspect')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64'],
default='armv7',
help='The Android target architecture when building for Android. '
'Currently only armv7 and aarch64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
option('--enable-experimental-concurrency', toggle_true,
default=True,
help='Enable experimental Swift concurrency model.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to represent these options so that we can skip installing them if
# the user is running in install-all mode.
option('--skip-build-cmark', toggle_false('build_cmark'),
help='skip building cmark')
option('--skip-build-llvm', toggle_false('build_llvm'),
help='skip building llvm')
option('--skip-build-swift', toggle_false('build_swift'),
help='skip building swift')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
|
|
# pylint: disable-msg=E1101,W0612
import operator
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range
from pandas.core.common import isnull
from pandas.tseries.offsets import BDay
import pandas.util.testing as tm
from pandas.compat import range
from pandas import compat
from pandas.tools.util import cartesian_product
import pandas.sparse.frame as spf
from pandas._sparse import BlockIndex, IntIndex
from pandas.sparse.api import SparseSeries
from pandas.tests.series.test_misc_api import SharedWithSparse
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
index = np.arange(20)
arr[:2] = nan
arr[5:10] = nan
arr[-3:] = nan
return arr, index
def _test_data2():
# nan-based
arr = np.arange(15, dtype=float)
index = np.arange(15)
arr[7:12] = nan
arr[-1:] = nan
return arr, index
def _test_data1_zero():
# zero-based
arr, index = _test_data1()
arr[np.isnan(arr)] = 0
return arr, index
def _test_data2_zero():
# zero-based
arr, index = _test_data2()
arr[np.isnan(arr)] = 0
return arr, index
class TestSparseSeries(tm.TestCase, SharedWithSparse):
_multiprocess_can_split_ = True
def setUp(self):
arr, index = _test_data1()
date_index = bdate_range('1/1/2011', periods=len(index))
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
self.ts = self.bseries
self.btseries = SparseSeries(arr, index=date_index, kind='block')
self.iseries = SparseSeries(arr, index=index, kind='integer',
name='iseries')
arr, index = _test_data2()
self.bseries2 = SparseSeries(arr, index=index, kind='block')
self.iseries2 = SparseSeries(arr, index=index, kind='integer')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
self.ziseries = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
arr, index = _test_data2_zero()
self.zbseries2 = SparseSeries(arr, index=index, kind='block',
fill_value=0)
self.ziseries2 = SparseSeries(arr, index=index, kind='integer',
fill_value=0)
def test_constructor_dtype(self):
arr = SparseSeries([np.nan, 1, 2, np.nan])
self.assertEqual(arr.dtype, np.float64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseSeries([np.nan, 1, 2, np.nan], fill_value=0)
self.assertEqual(arr.dtype, np.float64)
self.assertEqual(arr.fill_value, 0)
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64, fill_value=np.nan)
self.assertEqual(arr.dtype, np.int64)
self.assertTrue(np.isnan(arr.fill_value))
arr = SparseSeries([0, 1, 2, 4], dtype=np.int64)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
arr = SparseSeries([0, 1, 2, 4], fill_value=0, dtype=np.int64)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
def test_iteration_and_str(self):
[x for x in self.bseries]
str(self.bseries)
def test_TimeSeries_deprecation(self):
# deprecation TimeSeries, #10890
with tm.assert_produces_warning(FutureWarning):
pd.SparseTimeSeries(1, index=pd.date_range('20130101', periods=3))
def test_construct_DataFrame_with_sp_series(self):
# it works!
df = DataFrame({'col': self.bseries})
# printing & access
df.iloc[:1]
df['col']
df.dtypes
str(df)
tm.assert_sp_series_equal(df['col'], self.bseries, check_names=False)
result = df.iloc[:, 0]
tm.assert_sp_series_equal(result, self.bseries, check_names=False)
# blocking
expected = Series({'col': 'float64:sparse'})
result = df.ftypes
tm.assert_series_equal(expected, result)
def test_constructor_preserve_attr(self):
arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
self.assertEqual(arr.dtype, np.int64)
self.assertEqual(arr.fill_value, 0)
s = pd.SparseSeries(arr, name='x')
self.assertEqual(s.dtype, np.int64)
self.assertEqual(s.fill_value, 0)
def test_series_density(self):
# GH2803
ts = Series(np.random.randn(10))
ts[2:-2] = nan
sts = ts.to_sparse()
density = sts.density # don't die
self.assertEqual(density, 4 / 10.0)
def test_sparse_to_dense(self):
arr, index = _test_data1()
series = self.bseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='bseries'))
series = self.bseries.to_dense(sparse_only=True)
indexer = np.isfinite(arr)
exp = Series(arr[indexer], index=index[indexer], name='bseries')
tm.assert_series_equal(series, exp)
series = self.iseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='iseries'))
arr, index = _test_data1_zero()
series = self.zbseries.to_dense()
tm.assert_series_equal(series, Series(arr, name='zbseries'))
series = self.ziseries.to_dense()
tm.assert_series_equal(series, Series(arr))
def test_to_dense_fill_value(self):
s = pd.Series([1, np.nan, np.nan, 3, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([1, np.nan, 0, 3, 0])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s).to_dense()
tm.assert_series_equal(res, s)
s = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan])
res = SparseSeries(s, fill_value=0).to_dense()
tm.assert_series_equal(res, s)
def test_dense_to_sparse(self):
series = self.bseries.to_dense()
bseries = series.to_sparse(kind='block')
iseries = series.to_sparse(kind='integer')
tm.assert_sp_series_equal(bseries, self.bseries)
tm.assert_sp_series_equal(iseries, self.iseries, check_names=False)
self.assertEqual(iseries.name, self.bseries.name)
self.assertEqual(len(series), len(bseries))
self.assertEqual(len(series), len(iseries))
self.assertEqual(series.shape, bseries.shape)
self.assertEqual(series.shape, iseries.shape)
# non-NaN fill value
series = self.zbseries.to_dense()
zbseries = series.to_sparse(kind='block', fill_value=0)
ziseries = series.to_sparse(kind='integer', fill_value=0)
tm.assert_sp_series_equal(zbseries, self.zbseries)
tm.assert_sp_series_equal(ziseries, self.ziseries, check_names=False)
self.assertEqual(ziseries.name, self.zbseries.name)
self.assertEqual(len(series), len(zbseries))
self.assertEqual(len(series), len(ziseries))
self.assertEqual(series.shape, zbseries.shape)
self.assertEqual(series.shape, ziseries.shape)
def test_to_dense_preserve_name(self):
assert (self.bseries.name is not None)
result = self.bseries.to_dense()
self.assertEqual(result.name, self.bseries.name)
def test_constructor(self):
# test setup guys
self.assertTrue(np.isnan(self.bseries.fill_value))
tm.assertIsInstance(self.bseries.sp_index, BlockIndex)
self.assertTrue(np.isnan(self.iseries.fill_value))
tm.assertIsInstance(self.iseries.sp_index, IntIndex)
self.assertEqual(self.zbseries.fill_value, 0)
tm.assert_numpy_array_equal(self.zbseries.values.values,
self.bseries.to_dense().fillna(0).values)
# pass SparseSeries
def _check_const(sparse, name):
# use passed series name
result = SparseSeries(sparse)
tm.assert_sp_series_equal(result, sparse)
self.assertEqual(sparse.name, name)
self.assertEqual(result.name, name)
# use passed name
result = SparseSeries(sparse, name='x')
tm.assert_sp_series_equal(result, sparse, check_names=False)
self.assertEqual(result.name, 'x')
_check_const(self.bseries, 'bseries')
_check_const(self.iseries, 'iseries')
_check_const(self.zbseries, 'zbseries')
# Sparse time series works
date_index = bdate_range('1/1/2000', periods=len(self.bseries))
s5 = SparseSeries(self.bseries, index=date_index)
tm.assertIsInstance(s5, SparseSeries)
# pass Series
bseries2 = SparseSeries(self.bseries.to_dense())
tm.assert_numpy_array_equal(self.bseries.sp_values, bseries2.sp_values)
# pass dict?
# don't copy the data by default
values = np.ones(self.bseries.npoints)
sp = SparseSeries(values, sparse_index=self.bseries.sp_index)
sp.sp_values[:5] = 97
self.assertEqual(values[0], 97)
self.assertEqual(len(sp), 20)
self.assertEqual(sp.shape, (20, ))
# but can make it copy!
sp = SparseSeries(values, sparse_index=self.bseries.sp_index,
copy=True)
sp.sp_values[:5] = 100
self.assertEqual(values[0], 97)
self.assertEqual(len(sp), 20)
self.assertEqual(sp.shape, (20, ))
def test_constructor_scalar(self):
data = 5
sp = SparseSeries(data, np.arange(100))
sp = sp.reindex(np.arange(200))
self.assertTrue((sp.ix[:99] == data).all())
self.assertTrue(isnull(sp.ix[100:]).all())
data = np.nan
sp = SparseSeries(data, np.arange(100))
self.assertEqual(len(sp), 100)
self.assertEqual(sp.shape, (100, ))
def test_constructor_ndarray(self):
pass
def test_constructor_nonnan(self):
arr = [0, 0, 0, nan, nan]
sp_series = SparseSeries(arr, fill_value=0)
tm.assert_numpy_array_equal(sp_series.values.values, np.array(arr))
self.assertEqual(len(sp_series), 5)
self.assertEqual(sp_series.shape, (5, ))
# GH 9272
def test_constructor_empty(self):
sp = SparseSeries()
self.assertEqual(len(sp.index), 0)
self.assertEqual(sp.shape, (0, ))
def test_copy_astype(self):
cop = self.bseries.astype(np.float64)
self.assertIsNot(cop, self.bseries)
self.assertIs(cop.sp_index, self.bseries.sp_index)
self.assertEqual(cop.dtype, np.float64)
cop2 = self.iseries.copy()
tm.assert_sp_series_equal(cop, self.bseries)
tm.assert_sp_series_equal(cop2, self.iseries)
# test that data is copied
cop[:5] = 97
self.assertEqual(cop.sp_values[0], 97)
self.assertNotEqual(self.bseries.sp_values[0], 97)
# correct fill value
zbcop = self.zbseries.copy()
zicop = self.ziseries.copy()
tm.assert_sp_series_equal(zbcop, self.zbseries)
tm.assert_sp_series_equal(zicop, self.ziseries)
# no deep copy
view = self.bseries.copy(deep=False)
view.sp_values[:5] = 5
self.assertTrue((self.bseries.sp_values[:5] == 5).all())
def test_shape(self):
# GH 10452
self.assertEqual(self.bseries.shape, (20, ))
self.assertEqual(self.btseries.shape, (20, ))
self.assertEqual(self.iseries.shape, (20, ))
self.assertEqual(self.bseries2.shape, (15, ))
self.assertEqual(self.iseries2.shape, (15, ))
self.assertEqual(self.zbseries2.shape, (15, ))
self.assertEqual(self.ziseries2.shape, (15, ))
def test_astype(self):
with tm.assertRaises(ValueError):
self.bseries.astype(np.int64)
def test_astype_all(self):
orig = pd.Series(np.array([1, 2, 3]))
s = SparseSeries(orig)
types = [np.float64, np.float32, np.int64,
np.int32, np.int16, np.int8]
for typ in types:
res = s.astype(typ)
self.assertEqual(res.dtype, typ)
tm.assert_series_equal(res.to_dense(), orig.astype(typ))
def test_kind(self):
self.assertEqual(self.bseries.kind, 'block')
self.assertEqual(self.iseries.kind, 'integer')
def test_to_frame(self):
# GH 9850
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x')
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.SparseDataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_sp_frame_equal(s.to_frame(name='y'), exp)
s = pd.SparseSeries([1, 2, 0, nan, 4, nan, 0], name='x', fill_value=0)
exp = pd.SparseDataFrame({'x': [1, 2, 0, nan, 4, nan, 0]},
default_fill_value=0)
tm.assert_sp_frame_equal(s.to_frame(), exp)
exp = pd.DataFrame({'y': [1, 2, 0, nan, 4, nan, 0]})
tm.assert_frame_equal(s.to_frame(name='y').to_dense(), exp)
def test_pickle(self):
def _test_roundtrip(series):
unpickled = self.round_trip_pickle(series)
tm.assert_sp_series_equal(series, unpickled)
tm.assert_series_equal(series.to_dense(), unpickled.to_dense())
self._check_all(_test_roundtrip)
def _check_all(self, check_func):
check_func(self.bseries)
check_func(self.iseries)
check_func(self.zbseries)
check_func(self.ziseries)
def test_getitem(self):
def _check_getitem(sp, dense):
for idx, val in compat.iteritems(dense):
tm.assert_almost_equal(val, sp[idx])
for i in range(len(dense)):
tm.assert_almost_equal(sp[i], dense[i])
# j = np.float64(i)
# assert_almost_equal(sp[j], dense[j])
# API change 1/6/2012
# negative getitem works
# for i in xrange(len(dense)):
# assert_almost_equal(sp[-i], dense[-i])
_check_getitem(self.bseries, self.bseries.to_dense())
_check_getitem(self.btseries, self.btseries.to_dense())
_check_getitem(self.zbseries, self.zbseries.to_dense())
_check_getitem(self.iseries, self.iseries.to_dense())
_check_getitem(self.ziseries, self.ziseries.to_dense())
# exception handling
self.assertRaises(Exception, self.bseries.__getitem__,
len(self.bseries) + 1)
# index not contained
self.assertRaises(Exception, self.btseries.__getitem__,
self.btseries.index[-1] + BDay())
def test_get_get_value(self):
tm.assert_almost_equal(self.bseries.get(10), self.bseries[10])
self.assertIsNone(self.bseries.get(len(self.bseries) + 1))
dt = self.btseries.index[10]
result = self.btseries.get(dt)
expected = self.btseries.to_dense()[dt]
tm.assert_almost_equal(result, expected)
tm.assert_almost_equal(self.bseries.get_value(10), self.bseries[10])
def test_set_value(self):
idx = self.btseries.index[7]
self.btseries.set_value(idx, 0)
self.assertEqual(self.btseries[idx], 0)
self.iseries.set_value('foobar', 0)
self.assertEqual(self.iseries.index[-1], 'foobar')
self.assertEqual(self.iseries['foobar'], 0)
def test_getitem_slice(self):
idx = self.bseries.index
res = self.bseries[::2]
tm.assertIsInstance(res, SparseSeries)
expected = self.bseries.reindex(idx[::2])
tm.assert_sp_series_equal(res, expected)
res = self.bseries[:5]
tm.assertIsInstance(res, SparseSeries)
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:5]))
res = self.bseries[5:]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[5:]))
# negative indices
res = self.bseries[:-3]
tm.assert_sp_series_equal(res, self.bseries.reindex(idx[:-3]))
def test_take(self):
def _compare_with_dense(sp):
dense = sp.to_dense()
def _compare(idx):
dense_result = dense.take(idx).values
sparse_result = sp.take(idx)
self.assertIsInstance(sparse_result, SparseSeries)
tm.assert_almost_equal(dense_result,
sparse_result.values.values)
_compare([1., 2., 3., 4., 5., 0.])
_compare([7, 2, 9, 0, 4])
_compare([3, 6, 3, 4, 7])
self._check_all(_compare_with_dense)
self.assertRaises(Exception, self.bseries.take,
[0, len(self.bseries) + 1])
# Corner case
sp = SparseSeries(np.ones(10) * nan)
exp = pd.Series(np.repeat(nan, 5))
tm.assert_series_equal(sp.take([0, 1, 2, 3, 4]), exp)
def test_numpy_take(self):
sp = SparseSeries([1.0, 2.0, 3.0])
indices = [1, 2]
tm.assert_series_equal(np.take(sp, indices, axis=0).to_dense(),
np.take(sp.to_dense(), indices, axis=0))
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.take,
sp, indices, out=np.empty(sp.shape))
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.take,
sp, indices, mode='clip')
def test_setitem(self):
self.bseries[5] = 7.
self.assertEqual(self.bseries[5], 7.)
def test_setslice(self):
self.bseries[5:10] = 7.
tm.assert_series_equal(self.bseries[5:10].to_dense(),
Series(7., index=range(5, 10),
name=self.bseries.name))
def test_operators(self):
def _check_op(a, b, op):
sp_result = op(a, b)
adense = a.to_dense() if isinstance(a, SparseSeries) else a
bdense = b.to_dense() if isinstance(b, SparseSeries) else b
dense_result = op(adense, bdense)
tm.assert_almost_equal(sp_result.to_dense(), dense_result)
def check(a, b):
_check_op(a, b, operator.add)
_check_op(a, b, operator.sub)
_check_op(a, b, operator.truediv)
_check_op(a, b, operator.floordiv)
_check_op(a, b, operator.mul)
_check_op(a, b, lambda x, y: operator.add(y, x))
_check_op(a, b, lambda x, y: operator.sub(y, x))
_check_op(a, b, lambda x, y: operator.truediv(y, x))
_check_op(a, b, lambda x, y: operator.floordiv(y, x))
_check_op(a, b, lambda x, y: operator.mul(y, x))
# NaN ** 0 = 1 in C?
# _check_op(a, b, operator.pow)
# _check_op(a, b, lambda x, y: operator.pow(y, x))
check(self.bseries, self.bseries)
check(self.iseries, self.iseries)
check(self.bseries, self.iseries)
check(self.bseries, self.bseries2)
check(self.bseries, self.iseries2)
check(self.iseries, self.iseries2)
# scalar value
check(self.bseries, 5)
# zero-based
check(self.zbseries, self.zbseries * 2)
check(self.zbseries, self.zbseries2)
check(self.ziseries, self.ziseries2)
# with dense
result = self.bseries + self.bseries.to_dense()
tm.assert_sp_series_equal(result, self.bseries + self.bseries)
def test_binary_operators(self):
# skipping for now #####
import nose
raise nose.SkipTest("skipping sparse binary operators test")
def _check_inplace_op(iop, op):
tmp = self.bseries.copy()
expected = op(tmp, self.bseries)
iop(tmp, self.bseries)
tm.assert_sp_series_equal(tmp, expected)
inplace_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'pow']
for op in inplace_ops:
_check_inplace_op(getattr(operator, "i%s" % op),
getattr(operator, op))
def test_abs(self):
s = SparseSeries([1, 2, -3], name='x')
expected = SparseSeries([1, 2, 3], name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
self.assertEqual(result.name, 'x')
result = abs(s)
tm.assert_sp_series_equal(result, expected)
self.assertEqual(result.name, 'x')
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
self.assertEqual(result.name, 'x')
s = SparseSeries([1, -2, 2, -3], fill_value=-2, name='x')
expected = SparseSeries([1, 2, 3], sparse_index=s.sp_index,
fill_value=2, name='x')
result = s.abs()
tm.assert_sp_series_equal(result, expected)
self.assertEqual(result.name, 'x')
result = abs(s)
tm.assert_sp_series_equal(result, expected)
self.assertEqual(result.name, 'x')
result = np.abs(s)
tm.assert_sp_series_equal(result, expected)
self.assertEqual(result.name, 'x')
def test_reindex(self):
def _compare_with_series(sps, new_index):
spsre = sps.reindex(new_index)
series = sps.to_dense()
seriesre = series.reindex(new_index)
seriesre = seriesre.to_sparse(fill_value=sps.fill_value)
tm.assert_sp_series_equal(spsre, seriesre)
tm.assert_series_equal(spsre.to_dense(), seriesre.to_dense())
_compare_with_series(self.bseries, self.bseries.index[::2])
_compare_with_series(self.bseries, list(self.bseries.index[::2]))
_compare_with_series(self.bseries, self.bseries.index[:10])
_compare_with_series(self.bseries, self.bseries.index[5:])
_compare_with_series(self.zbseries, self.zbseries.index[::2])
_compare_with_series(self.zbseries, self.zbseries.index[:10])
_compare_with_series(self.zbseries, self.zbseries.index[5:])
# special cases
same_index = self.bseries.reindex(self.bseries.index)
tm.assert_sp_series_equal(self.bseries, same_index)
self.assertIsNot(same_index, self.bseries)
# corner cases
sp = SparseSeries([], index=[])
# TODO: sp_zero is not used anywhere...remove?
sp_zero = SparseSeries([], index=[], fill_value=0) # noqa
_compare_with_series(sp, np.arange(10))
# with copy=False
reindexed = self.bseries.reindex(self.bseries.index, copy=True)
reindexed.sp_values[:] = 1.
self.assertTrue((self.bseries.sp_values != 1.).all())
reindexed = self.bseries.reindex(self.bseries.index, copy=False)
reindexed.sp_values[:] = 1.
tm.assert_numpy_array_equal(self.bseries.sp_values, np.repeat(1., 10))
def test_sparse_reindex(self):
length = 10
def _check(values, index1, index2, fill_value):
first_series = SparseSeries(values, sparse_index=index1,
fill_value=fill_value)
reindexed = first_series.sparse_reindex(index2)
self.assertIs(reindexed.sp_index, index2)
int_indices1 = index1.to_int_index().indices
int_indices2 = index2.to_int_index().indices
expected = Series(values, index=int_indices1)
expected = expected.reindex(int_indices2).fillna(fill_value)
tm.assert_almost_equal(expected.values, reindexed.sp_values)
# make sure level argument asserts
# TODO: expected is not used anywhere...remove?
expected = expected.reindex(int_indices2).fillna(fill_value) # noqa
def _check_with_fill_value(values, first, second, fill_value=nan):
i_index1 = IntIndex(length, first)
i_index2 = IntIndex(length, second)
b_index1 = i_index1.to_block_index()
b_index2 = i_index2.to_block_index()
_check(values, i_index1, i_index2, fill_value)
_check(values, b_index1, b_index2, fill_value)
def _check_all(values, first, second):
_check_with_fill_value(values, first, second, fill_value=nan)
_check_with_fill_value(values, first, second, fill_value=0)
index1 = [2, 4, 5, 6, 8, 9]
values1 = np.arange(6.)
_check_all(values1, index1, [2, 4, 5])
_check_all(values1, index1, [2, 3, 4, 5, 6, 7, 8, 9])
_check_all(values1, index1, [0, 1])
_check_all(values1, index1, [0, 1, 7, 8, 9])
_check_all(values1, index1, [])
first_series = SparseSeries(values1,
sparse_index=IntIndex(length, index1),
fill_value=nan)
with tm.assertRaisesRegexp(TypeError,
'new index must be a SparseIndex'):
reindexed = first_series.sparse_reindex(0) # noqa
def test_repr(self):
# TODO: These aren't used
bsrepr = repr(self.bseries) # noqa
isrepr = repr(self.iseries) # noqa
def test_iter(self):
pass
def test_truncate(self):
pass
def test_fillna(self):
pass
def test_groupby(self):
pass
def test_reductions(self):
def _compare_with_dense(obj, op):
sparse_result = getattr(obj, op)()
series = obj.to_dense()
dense_result = getattr(series, op)()
self.assertEqual(sparse_result, dense_result)
to_compare = ['count', 'sum', 'mean', 'std', 'var', 'skew']
def _compare_all(obj):
for op in to_compare:
_compare_with_dense(obj, op)
_compare_all(self.bseries)
self.bseries.sp_values[5:10] = np.NaN
_compare_all(self.bseries)
_compare_all(self.zbseries)
self.zbseries.sp_values[5:10] = np.NaN
_compare_all(self.zbseries)
series = self.zbseries.copy()
series.fill_value = 2
_compare_all(series)
nonna = Series(np.random.randn(20)).to_sparse()
_compare_all(nonna)
nonna2 = Series(np.random.randn(20)).to_sparse(fill_value=0)
_compare_all(nonna2)
def test_dropna(self):
sp = SparseSeries([0, 0, 0, nan, nan, 5, 6], fill_value=0)
sp_valid = sp.valid()
expected = sp.to_dense().valid()
expected = expected[expected != 0]
exp_arr = pd.SparseArray(expected.values, fill_value=0, kind='block')
tm.assert_sp_array_equal(sp_valid.values, exp_arr)
self.assert_index_equal(sp_valid.index, expected.index)
self.assertEqual(len(sp_valid.sp_values), 2)
result = self.bseries.dropna()
expected = self.bseries.to_dense().dropna()
self.assertNotIsInstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
def test_homogenize(self):
def _check_matches(indices, expected):
data = {}
for i, idx in enumerate(indices):
data[i] = SparseSeries(idx.to_int_index().indices,
sparse_index=idx, fill_value=np.nan)
# homogenized is only valid with NaN fill values
homogenized = spf.homogenize(data)
for k, v in compat.iteritems(homogenized):
assert (v.sp_index.equals(expected))
indices1 = [BlockIndex(10, [2], [7]), BlockIndex(10, [1, 6], [3, 4]),
BlockIndex(10, [0], [10])]
expected1 = BlockIndex(10, [2, 6], [2, 3])
_check_matches(indices1, expected1)
indices2 = [BlockIndex(10, [2], [7]), BlockIndex(10, [2], [7])]
expected2 = indices2[0]
_check_matches(indices2, expected2)
# must have NaN fill value
data = {'a': SparseSeries(np.arange(7), sparse_index=expected2,
fill_value=0)}
with tm.assertRaisesRegexp(TypeError, "NaN fill value"):
spf.homogenize(data)
def test_fill_value_corner(self):
cop = self.zbseries.copy()
cop.fill_value = 0
result = self.bseries / cop
self.assertTrue(np.isnan(result.fill_value))
cop2 = self.zbseries.copy()
cop2.fill_value = 1
result = cop2 / cop
# 1 / 0 is inf
self.assertTrue(np.isinf(result.fill_value))
def test_fill_value_when_combine_const(self):
# GH12723
s = SparseSeries([0, 1, np.nan, 3, 4, 5], index=np.arange(6))
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
self.assert_series_equal(res, exp)
def test_shift(self):
series = SparseSeries([nan, 1., 2., 3., nan, nan], index=np.arange(6))
shifted = series.shift(0)
self.assertIsNot(shifted, series)
tm.assert_sp_series_equal(shifted, series)
f = lambda s: s.shift(1)
_dense_series_compare(series, f)
f = lambda s: s.shift(-2)
_dense_series_compare(series, f)
series = SparseSeries([nan, 1., 2., 3., nan, nan],
index=bdate_range('1/1/2000', periods=6))
f = lambda s: s.shift(2, freq='B')
_dense_series_compare(series, f)
f = lambda s: s.shift(2, freq=BDay())
_dense_series_compare(series, f)
def test_shift_nan(self):
# GH 12908
orig = pd.Series([np.nan, 2, np.nan, 4, 0, np.nan, 0])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=0))
def test_shift_dtype(self):
# GH 12908
orig = pd.Series([1, 2, 3, 4], dtype=np.int64)
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.shift(0), orig.shift(0).to_sparse())
sparse = orig.to_sparse(fill_value=np.nan)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=np.nan))
# shift(1) or more span changes dtype to float64
tm.assert_sp_series_equal(sparse.shift(1), orig.shift(1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(2), orig.shift(2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(3), orig.shift(3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-1), orig.shift(-1).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-2), orig.shift(-2).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-3), orig.shift(-3).to_sparse())
tm.assert_sp_series_equal(sparse.shift(-4), orig.shift(-4).to_sparse())
def test_shift_dtype_fill_value(self):
# GH 12908
orig = pd.Series([1, 0, 0, 4], dtype=np.int64)
for v in [0, 1, np.nan]:
sparse = orig.to_sparse(fill_value=v)
tm.assert_sp_series_equal(sparse.shift(0),
orig.shift(0).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(1),
orig.shift(1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(2),
orig.shift(2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(3),
orig.shift(3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-1),
orig.shift(-1).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-2),
orig.shift(-2).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-3),
orig.shift(-3).to_sparse(fill_value=v))
tm.assert_sp_series_equal(sparse.shift(-4),
orig.shift(-4).to_sparse(fill_value=v))
def test_combine_first(self):
s = self.bseries
result = s[::2].combine_first(s)
result2 = s[::2].combine_first(s.to_dense())
expected = s[::2].to_dense().combine_first(s.to_dense())
expected = expected.to_sparse(fill_value=s.fill_value)
tm.assert_sp_series_equal(result, result2)
tm.assert_sp_series_equal(result, expected)
class TestSparseHandlingMultiIndexes(tm.TestCase):
def setUp(self):
miindex = pd.MultiIndex.from_product(
[["x", "y"], ["10", "20"]], names=['row-foo', 'row-bar'])
micol = pd.MultiIndex.from_product(
[['a', 'b', 'c'], ["1", "2"]], names=['col-foo', 'col-bar'])
dense_multiindex_frame = pd.DataFrame(
index=miindex, columns=micol).sortlevel().sortlevel(axis=1)
self.dense_multiindex_frame = dense_multiindex_frame.fillna(value=3.14)
def test_to_sparse_preserve_multiindex_names_columns(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
sparse_multiindex_frame = sparse_multiindex_frame.copy()
tm.assert_index_equal(sparse_multiindex_frame.columns,
self.dense_multiindex_frame.columns)
def test_round_trip_preserve_multiindex_names(self):
sparse_multiindex_frame = self.dense_multiindex_frame.to_sparse()
round_trip_multiindex_frame = sparse_multiindex_frame.to_dense()
tm.assert_frame_equal(self.dense_multiindex_frame,
round_trip_multiindex_frame,
check_column_type=True,
check_names=True)
class TestSparseSeriesScipyInteraction(tm.TestCase):
# Issue 8048: add SparseSeries coo methods
def setUp(self):
tm._skip_if_no_scipy()
import scipy.sparse
# SparseSeries inputs used in tests, the tests rely on the order
self.sparse_series = []
s = pd.Series([3.0, nan, 1.0, 2.0, nan, nan])
s.index = pd.MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
self.sparse_series.append(s.to_sparse())
ss = self.sparse_series[0].copy()
ss.index.names = [3, 0, 1, 2]
self.sparse_series.append(ss)
ss = pd.Series([
nan
] * 12, index=cartesian_product((range(3), range(4)))).to_sparse()
for k, v in zip([(0, 0), (1, 2), (1, 3)], [3.0, 1.0, 2.0]):
ss[k] = v
self.sparse_series.append(ss)
# results used in tests
self.coo_matrices = []
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)))
self.coo_matrices.append(scipy.sparse.coo_matrix(
([3.0, 1.0, 2.0], ([0, 1, 1], [0, 0, 1])), shape=(3, 2)))
self.ils = [[(1, 2), (1, 1), (2, 1)], [(1, 1), (1, 2), (2, 1)],
[(1, 2, 'a'), (1, 1, 'b'), (2, 1, 'b')]]
self.jls = [[('a', 0), ('a', 1), ('b', 0), ('b', 1)], [0, 1]]
def test_to_coo_text_names_integer_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1], 'column_levels': [2, 3]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_integer_row_levels_sort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': [0, 1],
'column_levels': [2, 3],
'sort_labels': True}
result = (self.coo_matrices[1], self.ils[1], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort_col_level_single(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B', 'C'],
'column_levels': ['D'],
'sort_labels': False}
result = (self.coo_matrices[2], self.ils[2], self.jls[1])
self._run_test(ss, kwargs, result)
def test_to_coo_integer_names_integer_row_levels_nosort(self):
ss = self.sparse_series[1]
kwargs = {'row_levels': [3, 0], 'column_levels': [1, 2]}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_text_names_text_row_levels_nosort(self):
ss = self.sparse_series[0]
kwargs = {'row_levels': ['A', 'B'], 'column_levels': ['C', 'D']}
result = (self.coo_matrices[0], self.ils[0], self.jls[0])
self._run_test(ss, kwargs, result)
def test_to_coo_bad_partition_nonnull_intersection(self):
ss = self.sparse_series[0]
self.assertRaises(ValueError, ss.to_coo, ['A', 'B', 'C'], ['C', 'D'])
def test_to_coo_bad_partition_small_union(self):
ss = self.sparse_series[0]
self.assertRaises(ValueError, ss.to_coo, ['A'], ['C', 'D'])
def test_to_coo_nlevels_less_than_two(self):
ss = self.sparse_series[0]
ss.index = np.arange(len(ss.index))
self.assertRaises(ValueError, ss.to_coo)
def test_to_coo_bad_ilevel(self):
ss = self.sparse_series[0]
self.assertRaises(KeyError, ss.to_coo, ['A', 'B'], ['C', 'D', 'E'])
def test_to_coo_duplicate_index_entries(self):
ss = pd.concat([self.sparse_series[0],
self.sparse_series[0]]).to_sparse()
self.assertRaises(ValueError, ss.to_coo, ['A', 'B'], ['C', 'D'])
def test_from_coo_dense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=True)
check = self.sparse_series[2]
tm.assert_sp_series_equal(ss, check)
def test_from_coo_nodense_index(self):
ss = SparseSeries.from_coo(self.coo_matrices[0], dense_index=False)
check = self.sparse_series[2]
check = check.dropna().to_sparse()
tm.assert_sp_series_equal(ss, check)
def test_from_coo_long_repr(self):
# GH 13114
# test it doesn't raise error. Formatting is tested in test_format
tm._skip_if_no_scipy()
import scipy.sparse
sparse = SparseSeries.from_coo(scipy.sparse.rand(350, 18))
repr(sparse)
def _run_test(self, ss, kwargs, check):
results = ss.to_coo(**kwargs)
self._check_results_to_coo(results, check)
# for every test, also test symmetry property (transpose), switch
# row_levels and column_levels
d = kwargs.copy()
d['row_levels'] = kwargs['column_levels']
d['column_levels'] = kwargs['row_levels']
results = ss.to_coo(**d)
results = (results[0].T, results[2], results[1])
self._check_results_to_coo(results, check)
def _check_results_to_coo(self, results, check):
(A, il, jl) = results
(A_result, il_result, jl_result) = check
# convert to dense and compare
tm.assert_numpy_array_equal(A.todense(), A_result.todense())
# or compare directly as difference of sparse
# assert(abs(A - A_result).max() < 1e-12) # max is failing in python
# 2.6
self.assertEqual(il, il_result)
self.assertEqual(jl, jl_result)
def test_concat(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
self.assertIsInstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block', fill_value=0)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind='integer')
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind='block', fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_sparse_dense(self):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.concat([pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_value_counts(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_dup(self):
vals = [1, 2, nan, 0, nan, 1, 2, nan, nan, 1, 2, 0, 1, 1]
# numeric op may cause sp_values to include the same value as
# fill_value
dense = pd.Series(vals, name='xx') / 0.
sparse = pd.SparseSeries(vals, name='xx') / 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
vals = [1, 2, 0, 0, 0, 1, 2, 0, 0, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx') * 0.
sparse = pd.SparseSeries(vals, name='xx') * 0.
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_value_counts_int(self):
vals = [1, 2, 0, 1, 2, 1, 2, 0, 1, 1]
dense = pd.Series(vals, name='xx')
# fill_value is np.nan, but should not be included in the result
sparse = pd.SparseSeries(vals, name='xx')
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
sparse = pd.SparseSeries(vals, name='xx', fill_value=0)
tm.assert_series_equal(sparse.value_counts(),
dense.value_counts())
tm.assert_series_equal(sparse.value_counts(dropna=False),
dense.value_counts(dropna=False))
def test_isnull(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.isnull()
exp = pd.SparseSeries([True, True, False, False, True], name='xxx',
fill_value=True)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.isnull()
tm.assertIsInstance(res, pd.SparseSeries)
exp = pd.Series([True, False, False, False, False], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def test_isnotnull(self):
# GH 8276
s = pd.SparseSeries([np.nan, np.nan, 1, 2, np.nan], name='xxx')
res = s.isnotnull()
exp = pd.SparseSeries([False, False, True, True, False], name='xxx',
fill_value=False)
tm.assert_sp_series_equal(res, exp)
# if fill_value is not nan, True can be included in sp_values
s = pd.SparseSeries([np.nan, 0., 1., 2., 0.], name='xxx',
fill_value=0.)
res = s.isnotnull()
tm.assertIsInstance(res, pd.SparseSeries)
exp = pd.Series([False, True, True, True, True], name='xxx')
tm.assert_series_equal(res.to_dense(), exp)
def _dense_series_compare(s, f):
result = f(s)
assert (isinstance(result, SparseSeries))
dense_result = f(s.to_dense())
tm.assert_series_equal(result.to_dense(), dense_result)
class TestSparseSeriesAnalytics(tm.TestCase):
def setUp(self):
arr, index = _test_data1()
self.bseries = SparseSeries(arr, index=index, kind='block',
name='bseries')
arr, index = _test_data1_zero()
self.zbseries = SparseSeries(arr, index=index, kind='block',
fill_value=0, name='zbseries')
def test_cumsum(self):
result = self.bseries.cumsum()
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
# TODO: gh-12855 - return a SparseSeries here
result = self.zbseries.cumsum()
expected = self.zbseries.to_dense().cumsum()
self.assertNotIsInstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
def test_numpy_cumsum(self):
result = np.cumsum(self.bseries)
expected = SparseSeries(self.bseries.to_dense().cumsum())
tm.assert_sp_series_equal(result, expected)
# TODO: gh-12855 - return a SparseSeries here
result = np.cumsum(self.zbseries)
expected = self.zbseries.to_dense().cumsum()
self.assertNotIsInstance(result, SparseSeries)
tm.assert_series_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
self.bseries, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
self.zbseries, out=result)
def test_numpy_func_call(self):
# no exception should be raised even though
# numpy passes in 'axis=None' or `axis=-1'
funcs = ['sum', 'cumsum', 'var', 'mean',
'prod', 'cumprod', 'std', 'argsort',
'argmin', 'argmax', 'min', 'max']
for func in funcs:
for series in ('bseries', 'zbseries'):
getattr(np, func)(getattr(self, series))
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
|
# Bundles for JS/CSS Minification
PIPELINE_CSS = {
'common': {
'source_filenames': (
'normalize-css/normalize.css',
'sumo/less/main.less',
'sumo/less/search.less',
'mozilla-tabzilla/css/tabzilla.css',
),
'output_filename': 'build/common-min.css'
},
'community': {
'source_filenames': (
'sumo/less/wiki-content.less',
'community/less/community.less',
'community/less/select.less',
),
'output_filename': 'build/community-min.css'
},
'community-new': {
'source_filenames': (
'fontawesome/css/font-awesome.css',
'pikaday/css/pikaday.css',
'sumo/less/wiki-content.less',
'community/less/community-new.less',
),
'output_filename': 'build/community-new-min.css'
},
'mobile-common': {
'source_filenames': (
'fontawesome/css/font-awesome.css',
'normalize-css/normalize.css',
'sumo/less/mobile/main.less',
'sumo/less/mobile/search.less',
),
'output_filename': 'build/mobile-common-min.css'
},
'print': {
'source_filenames': (
'sumo/css/print.css',
),
'output_filename': 'build/print-min.css',
'extra_context': {
'media': 'print',
}
},
# TODO: remove dependency on jquery ui CSS and use our own
'jqueryui': {
'source_filenames': (
'sumo/css/jqueryui/jqueryui.css',
),
'output_filename': 'build/jqueryui-min.css'
},
'forums': {
'source_filenames': (
'sumo/less/forums.less',
'sumo/less/reportabuse.less',
),
'output_filename': 'build/forums-min.css'
},
'questions': {
'source_filenames': (
'sumo/less/questions.less',
'sumo/css/cannedresponses.css',
'sumo/less/reportabuse.less',
),
'output_filename': 'build/questions-min.css'
},
'questions.metrics': {
'source_filenames': (
'sumo/less/questions.metrics.less',
),
'output_filename': 'build/questions.metrics-min.css'
},
'questions.aaq.react': {
'source_filenames': (
'fontawesome/css/font-awesome.css',
'questions/less/questions.aaq.react.less',
),
'output_filename': 'build/questions.aaq.react-min.css'
},
'mobile-questions': {
'source_filenames': (
'sumo/less/mobile/questions.less',
),
'output_filename': 'build/mobile-questions-min.css'
},
'mobile-aaq': {
'source_filenames': (
'sumo/less/mobile/aaq.less',
),
'output_filename': 'build/mobile-aaq-min.css'
},
'rickshaw': {
'source_filenames': (
'sumo/css/jqueryui/jqueryui.css',
'sumo/css/rickshaw.css',
'sumo/less/rickshaw.sumo.less',
),
'output_filename': 'build/rickshaw-min.css'
},
'mobile-search': {
'source_filenames': (
'sumo/less/mobile/search.less',
),
'output_filename': 'build/mobile-search-min.css'
},
'wiki': {
'source_filenames': (
'sumo/css/users.autocomplete.css',
'sumo/css/users.list.css',
'sumo/less/wiki.less',
'sumo/less/wiki-content.less',
'sumo/css/screencast.css',
),
'output_filename': 'build/wiki-min.css'
},
'wiki-editor': {
'source_filenames': (
'codemirror/lib/codemirror.css',
'codemirror/addon/hint/show-hint.css',
),
'output_filename': 'wiki-editor-min.css'
},
'mobile-wiki': {
'source_filenames': (
'sumo/less/mobile/wiki.less',
'sumo/less/wiki-content.less',
),
'output_filename': 'build/mobile-wiki-min.css'
},
'mobile-wiki-minimal': {
'source_filenames': (
'normalize-css/normalize.css',
'sumo/less/mobile/main.less',
'sumo/less/mobile/wiki.less',
'sumo/less/wiki-content.less',
),
'output_filename': 'build/mobile-wiki-minimal-min.css'
},
'home': {
'source_filenames': (
'sumo/less/home.less',
),
'output_filename': 'build/home-min.css'
},
'gallery': {
'source_filenames': (
'sumo/less/gallery.less',
),
'output_filename': 'build/gallery-min.css'
},
'ie': {
'source_filenames': (
'sumo/css/ie.css',
'sumo/css/ie8.css',
),
'output_filename': 'build/ie-min.css'
},
'ie8': {
'source_filenames': ( # IE 8 needs some specific help.
'sumo/css/ie8.css',
),
'output_filename': 'build/ie8-min.css'
},
'customercare': {
'source_filenames': (
'sumo/less/customercare.less',
),
'output_filename': 'build/customercare-min.css'
},
'users': {
'source_filenames': (
'sumo/less/users.less',
'sumo/less/reportabuse.less',
),
'output_filename': 'build/users-min.css'
},
'mobile-users': {
'source_filenames': (
'sumo/less/mobile/users.less',
),
'output_filename': 'build/mobile-users-min.css'
},
'monitor': {
'source_filenames': (
'sumo/css/monitor.css',
),
'output_filename': 'build/monitor-min.css'
},
'messages': {
'source_filenames': (
'sumo/css/users.autocomplete.css',
'sumo/less/messages.less',
),
'output_filename': 'build/messages-min.css'
},
'mobile-messages': {
'source_filenames': (
'sumo/less/mobile/messages.less',
),
'output_filename': 'build/mobile-messages-min.css'
},
'products': {
'source_filenames': (
'sumo/less/products.less',
),
'output_filename': 'build/products-min.css'
},
'mobile-products': {
'source_filenames': (
'sumo/less/mobile/products.less',
),
'output_filename': 'build/mobile-products-min.css'
},
'groups': {
'source_filenames': (
'sumo/css/users.autocomplete.css',
'sumo/css/users.list.css',
'sumo/css/groups.css',
'sumo/css/wiki_syntax.css',
),
'output_filename': 'build/groups-min.css'
},
'kpi.dashboard': {
'source_filenames': (
'sumo/less/kpi.dashboard.less',
),
'output_filename': 'build/kpi.dashboard-min.css'
},
'locale-switcher': {
'source_filenames': (
'sumo/less/locale-switcher.less',
),
'output_filename': 'build/locale-switcher-min.css'
},
'mobile-locale-switcher': {
'source_filenames': (
'sumo/less/mobile/locales.less',
),
'output_filename': 'build/mobile-locale-switcher-min.css'
},
'kbdashboards': {
'source_filenames': (
'sumo/less/kbdashboards.less',
),
'output_filename': 'build/kbdashboards-min.css'
},
'landings-get-involved': {
'source_filenames': (
'sumo/less/landings/get-involved.less',
),
'output_filename': 'build/landings-get-involved-min.css'
},
'mobile-landings-get-involved': {
'source_filenames': (
'sumo/less/mobile/landings/get-involved.less',
),
'output_filename': 'build/mobile-landings-get-involved-min.css'
},
'badges': {
'source_filenames': (
'sumo/less/badges.less',
),
'output_filename': 'build/badges-min.css'
}
}
PIPELINE_JS = {
'common': {
'source_filenames': (
'sumo/js/i18n.js',
'underscore/underscore.js',
'moment/moment.js',
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.placeholder.js',
'sumo/js/templates/macros.js',
'sumo/js/templates/search-results-list.js',
'sumo/js/templates/search-results.js',
'nunjucks/browser/nunjucks-slim.js',
'sumo/js/nunjucks.js',
'sumo/js/cached_xhr.js',
'sumo/js/search_utils.js',
'sumo/js/browserdetect.js',
'sumo/js/libs/uitour.js',
'sumo/js/kbox.js',
'sumo/js/main.js',
'sumo/js/format.js',
'modernizr/modernizr.js',
'sumo/js/geoip-locale.js',
'mailcheck/src/mailcheck.js',
'sumo/js/ui.js',
'sumo/js/analytics.js',
'sumo/js/surveygizmo.js',
'sumo/js/instant_search.js',
'sumo/js/legacy_login_toggle.js',
'sumo/js/responsive-nav-toggle.js',
'sumo/js/profile-avatars.js'
),
'output_filename': 'build/common-min.js'
},
'common.fx.download': {
'source_filenames': (
'sumo/js/show-fx-download.js',
),
'output_filename': 'build/show-fx-download.js'
},
'community': {
'source_filenames': (
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'community/js/community.js',
'community/js/select.js',
),
'output_filename': 'build/community-min.js'
},
'community-new-questions': {
'source_filenames': (
# This uses the minified version because it is optimized to leave
# out lots of debug stuff, so it is significantly smaller than
# just minifying react.js.
# TODO: Figure out how to include the full sized version in dev,
# because it produces much nicer error messages.
'react/react.min.js',
# 'react/react.js',
'pikaday/pikaday.js',
'community/js/community-questions.browserify.js',
),
'output_filename': 'build/community-questions-min.js'
},
'community-new-l10n': {
'source_filenames': (
# This uses the minified version because it is optimized to leave
# out lots of debug stuff, so it is significantly smaller than
# just minifying react.js.
# TODO: Figure out how to include the full sized version in dev,
# because it produces much nicer error messages.
'react/react.min.js',
# 'react/react.js',
'pikaday/pikaday.js',
'community/js/community-l10n.browserify.js',
),
'output_filename': 'build/community-l10n-min.js'
},
'community.metrics': {
'source_filenames': (
'kpi/js/kpi.browserify.js',
),
'output_filename': 'build/kpi.dashboard-min.js'
},
'mobile-common': {
'source_filenames': (
'sumo/js/templates/mobile-search-results.js',
'moment/moment.js',
'sumo/js/i18n.js',
'underscore/underscore.js',
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'modernizr/modernizr.js',
'nunjucks/browser/nunjucks-slim.js',
'sumo/js/nunjucks.js',
'sumo/js/browserdetect.js',
'sumo/js/cached_xhr.js',
'sumo/js/search_utils.js',
'sumo/js/aaq.js',
'sumo/js/mobile/ui.js',
'sumo/js/analytics.js',
'sumo/js/instant_search.js',
'sumo/js/mobile/instant_search.js',
),
'output_filename': 'build/mobile-common-min.js'
},
'ie6-8': {
'source_filenames': (
'nwmatcher/src/nwmatcher.js',
'sumo/js/libs/selectivizr-1.0.2.js',
),
'output_filename': 'build/ie6-8-min.js'
},
'jqueryui': {
'source_filenames': (
'jquery-ui/ui/jquery.ui.core.js',
'jquery-ui/ui/jquery.ui.widget.js',
'jquery-ui/ui/jquery.ui.mouse.js',
'jquery-ui/ui/jquery.ui.position.js',
'jquery-ui/ui/jquery.ui.sortable.js',
'jquery-ui/ui/jquery.ui.accordion.js',
'jquery-ui/ui/jquery.ui.autocomplete.js',
'jquery-ui/ui/jquery.ui.datepicker.js',
'jquery-ui/ui/jquery.ui.menu.js',
'jquery-ui/ui/jquery.ui.slider.js',
'jquery-ui/ui/jquery.ui.tabs.js',
),
'output_filename': 'build/jqueryui-min.js'
},
'questions': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/ajaxvote.js',
'sumo/js/ajaxpreview.js',
'sumo/js/remote.js',
'sumo/js/aaq.js',
'sumo/js/questions.js',
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/tags.filter.js',
'sumo/js/tags.js',
'sumo/js/reportabuse.js',
'sumo/js/questions.metrics.js',
'sumo/js/libs/jquery.ajaxupload.js',
'sumo/js/upload.js',
),
'output_filename': 'build/questions-min.js'
},
'questions.metrics': {
'source_filenames': (
'sumo/js/questions.metrics-dashboard.js',
),
'output_filename': 'build/questions.metrics-min.js'
},
'questions.aaq.react': {
'source_filenames': (
# This uses the minified version because it is optimized to leave
# out lots of debug stuff, so it is significantly smaller than
# just minifying react.js.
# TODO: Figure out how to include the full sized version in dev,
# because it produces much nicer error messages.
'react/react.min.js',
# 'react/react.js',
'flux/dist/Flux.js',
'underscore/underscore.js',
'questions/js/aaq.browserify.js',
),
'output_filename': 'build/questions.aaq.react-min.js',
},
'mobile-questions': {
'source_filenames': (
'sumo/js/mobile/questions.js',
'sumo/js/questions.metrics.js',
),
'output_filename': 'build/mobile-questions-min.js'
},
'mobile-aaq': {
'source_filenames': (
'sumo/js/aaq.js',
'sumo/js/mobile/aaq.js',
),
'output_filename': 'build/mobile-aaq-min.js'
},
'products': {
'source_filenames': (
'sumo/js/compare_versions.js',
'sumo/js/products.js',
),
'output_filename': 'build/products-min.js'
},
'mobile-products': {
'source_filenames': (
'sumo/js/templates/mobile-product-search-results.js',
'nunjucks/browser/nunjucks-slim.js',
'sumo/js/nunjucks.js',
'moment/moment.js',
'sumo/js/cached_xhr.js',
'sumo/js/search_utils.js',
'sumo/js/instant_search.js',
'sumo/js/mobile/products.js',
),
'output_filename': 'build/mobile-products-min.js'
},
'search': {
'source_filenames': (
'sumo/js/search.js',
),
'output_filename': 'build/search-min.js'
},
'forums': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/ajaxpreview.js',
'sumo/js/forums.js',
'sumo/js/reportabuse.js',
),
'output_filename': 'build/forums-min.js'
},
'gallery': {
'source_filenames': (
'sumo/js/libs/jquery.ajaxupload.js',
'sumo/js/gallery.js',
),
'output_filename': 'build/gallery-min.js'
},
'wiki': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/libs/django/urlify.js',
'sumo/js/libs/django/prepopulate.js',
'sumo/js/libs/jquery.lazyload.js',
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
'sumo/js/screencast.js',
'sumo/js/showfor.js',
'sumo/js/ajaxvote.js',
'sumo/js/ajaxpreview.js',
'sumo/js/wiki.js',
'sumo/js/tags.js',
'sumo/js/dashboards.js',
'sumo/js/editable.js',
'sumo/js/wiki.metrics.js',
'sumo/js/templates/wiki-related-doc.js',
'sumo/js/templates/wiki-search-results.js',
'sumo/js/wiki_search.js',
),
'output_filename': 'build/wiki-min.js'
},
'rickshaw': {
'source_filenames': (
'd3/d3.js',
'sumo/js/libs/d3.layout.min.js',
'sumo/js/libs/rickshaw.js',
'sumo/js/rickshaw_utils.js',
),
'output_filename': 'build/rickshaw-min.js'
},
'mobile-wiki': {
'source_filenames': (
'underscore/underscore.js',
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.lazyload.js',
'sumo/js/browserdetect.js',
'sumo/js/showfor.js',
'sumo/js/ajaxform.js',
'sumo/js/mobile/wiki.js',
'sumo/js/wiki.metrics.js',
),
'output_filename': 'build/mobile-wiki-min.js'
},
'mobile-wiki-minimal': {
'source_filenames': (
'sumo/js/i18n.js',
'underscore/underscore.js',
'jquery/jquery.min.js',
'jquery/jquery-migrate.js',
'modernizr/modernizr.js',
'sumo/js/browserdetect.js',
'sumo/js/mobile/ui.js',
'sumo/js/analytics.js',
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.lazyload.js',
'sumo/js/showfor.js',
'sumo/js/ajaxform.js',
'sumo/js/mobile/wiki.js',
'sumo/js/wiki.metrics.js',
),
'output_filename': 'build/mobile-wiki-minimal-min.js'
},
'wiki.history': {
'source_filenames': (
'sumo/js/historycharts.js',
),
'output_filename': 'build/wiki.history-min.js'
},
'wiki.diff': {
'source_filenames': (
'sumo/js/libs/diff_match_patch_uncompressed.js',
'sumo/js/diff.js',
),
'output_filename': 'build/wiki.diff-min.js'
},
'wiki.editor': {
'source_filenames': (
'codemirror/lib/codemirror.js',
'codemirror/addon/mode/simple.js',
'codemirror/addon/hint/show-hint.js',
'sumo/js/codemirror.sumo-hint.js',
'sumo/js/codemirror.sumo-mode.js',
),
'output_filename': 'build/wiki.editor-min.js'
},
'wiki.dashboard': {
'source_filenames': (
'sumo/js/wiki.dashboard.js',
),
'output_filename': 'build/wiki.dashboard-min.js'
},
'customercare': {
'source_filenames': (
'sumo/js/libs/jquery.cookie.js',
'sumo/js/libs/jquery.bullseye-1.0.min.js',
'sumo/js/libs/twitter-text.js',
'sumo/js/customercare.js',
'sumo/js/users.js',
),
'output_filename': 'build/customercare-min.js'
},
'users': {
'source_filenames': (
'sumo/js/users.js',
'sumo/js/reportabuse.js',
),
'output_filename': 'build/users-min.js'
},
'messages': {
'source_filenames': (
'sumo/js/markup.js',
'sumo/js/libs/jquery.autoresize.js',
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
'sumo/js/ajaxpreview.js',
'sumo/js/messages.js',
),
'output_filename': 'build/messages-min.js'
},
'mobile-messages': {
'source_filenames': (
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
),
'output_filename': 'build/mobile-messages-min.js'
},
'groups': {
'source_filenames': (
'sumo/js/libs/jquery.tokeninput.js',
'sumo/js/users.autocomplete.js',
'sumo/js/markup.js',
'sumo/js/groups.js',
'sumo/js/editable.js',
),
'output_filename': 'build/groups-min.js'
},
'kpi.dashboard': {
'source_filenames': (
'd3/d3.js',
'kpi/js/kpi.browserify.js',
),
'output_filename': 'build/kpi.dashboard-min.js'
},
'experiment_fxa_cta_topbar': {
'source_filenames': (
'sumo/js/libs/mozilla-dnt-helper.js',
'sumo/js/libs/mozilla-cookie-helper.js',
'sumo/js/libs/mozilla-traffic-cop.js',
'sumo/js/experiment-fxa-cta-topbar.js',
),
'output_filename': 'build/experiment-fxa-cta-topbar-min.js'
},
'gtm-snippet': {
'source_filenames': (
'sumo/js/dnt-helper.js',
'sumo/js/gtm-snippet.js',
),
'output_filename': 'build/gtm-snippet-min.js'
}
}
|
|
import unittest
from io import BytesIO, BufferedReader
from urllib3.response import HTTPResponse
from urllib3.exceptions import DecodeError
from base64 import b64decode
# A known random (i.e, not-too-compressible) payload generated with:
# "".join(random.choice(string.printable) for i in xrange(512))
# .encode("zlib").encode("base64")
# Randomness in tests == bad, and fixing a seed may not be sufficient.
ZLIB_PAYLOAD = b64decode(b"""\
eJwFweuaoQAAANDfineQhiKLUiaiCzvuTEmNNlJGiL5QhnGpZ99z8luQfe1AHoMioB+QSWHQu/L+
lzd7W5CipqYmeVTBjdgSATdg4l4Z2zhikbuF+EKn69Q0DTpdmNJz8S33odfJoVEexw/l2SS9nFdi
pis7KOwXzfSqarSo9uJYgbDGrs1VNnQpT9f8zAorhYCEZronZQF9DuDFfNK3Hecc+WHLnZLQptwk
nufw8S9I43sEwxsT71BiqedHo0QeIrFE01F/4atVFXuJs2yxIOak3bvtXjUKAA6OKnQJ/nNvDGKZ
Khe5TF36JbnKVjdcL1EUNpwrWVfQpFYJ/WWm2b74qNeSZeQv5/xBhRdOmKTJFYgO96PwrHBlsnLn
a3l0LwJsloWpMbzByU5WLbRE6X5INFqjQOtIwYz5BAlhkn+kVqJvWM5vBlfrwP42ifonM5yF4ciJ
auHVks62997mNGOsM7WXNG3P98dBHPo2NhbTvHleL0BI5dus2JY81MUOnK3SGWLH8HeWPa1t5KcW
S5moAj5HexY/g/F8TctpxwsvyZp38dXeLDjSQvEQIkF7XR3YXbeZgKk3V34KGCPOAeeuQDIgyVhV
nP4HF2uWHA==""")
class TestLegacyResponse(unittest.TestCase):
def test_getheaders(self):
headers = {'host': 'example.com'}
r = HTTPResponse(headers=headers)
self.assertEqual(r.getheaders(), headers)
def test_getheader(self):
headers = {'host': 'example.com'}
r = HTTPResponse(headers=headers)
self.assertEqual(r.getheader('host'), 'example.com')
class TestResponse(unittest.TestCase):
def test_cache_content(self):
r = HTTPResponse('foo')
self.assertEqual(r.data, 'foo')
self.assertEqual(r._body, 'foo')
def test_default(self):
r = HTTPResponse()
self.assertEqual(r.data, None)
def test_none(self):
r = HTTPResponse(None)
self.assertEqual(r.data, None)
def test_preload(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=True)
self.assertEqual(fp.tell(), len(b'foo'))
self.assertEqual(r.data, b'foo')
def test_no_preload(self):
fp = BytesIO(b'foo')
r = HTTPResponse(fp, preload_content=False)
self.assertEqual(fp.tell(), 0)
self.assertEqual(r.data, b'foo')
self.assertEqual(fp.tell(), len(b'foo'))
def test_decode_bad_data(self):
fp = BytesIO(b'\x00' * 10)
self.assertRaises(DecodeError, HTTPResponse, fp, headers={
'content-encoding': 'deflate'
})
def test_decode_deflate(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'})
self.assertEqual(r.data, b'foo')
def test_decode_deflate_case_insensitve(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'DeFlAtE'})
self.assertEqual(r.data, b'foo')
def test_chunked_decoding_deflate(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
self.assertEqual(r.read(3), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
def test_chunked_decoding_deflate2(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
self.assertEqual(r.read(1), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
def test_chunked_decoding_gzip(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
r = HTTPResponse(fp, headers={'content-encoding': 'gzip'},
preload_content=False)
self.assertEqual(r.read(11), b'')
self.assertEqual(r.read(1), b'f')
self.assertEqual(r.read(2), b'oo')
def test_io(self):
import socket
try:
from http.client import HTTPResponse as OldHTTPResponse
except:
from httplib import HTTPResponse as OldHTTPResponse
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
self.assertEqual(resp.closed, False)
self.assertEqual(resp.readable(), True)
self.assertEqual(resp.writable(), False)
self.assertRaises(IOError, resp.fileno)
resp.close()
self.assertEqual(resp.closed, True)
# Try closing with an `httplib.HTTPResponse`, because it has an
# `isclosed` method.
hlr = OldHTTPResponse(socket.socket())
resp2 = HTTPResponse(hlr, preload_content=False)
self.assertEqual(resp2.closed, False)
resp2.close()
self.assertEqual(resp2.closed, True)
#also try when only data is present.
resp3 = HTTPResponse('foodata')
self.assertRaises(IOError, resp3.fileno)
resp3._fp = 2
# A corner case where _fp is present but doesn't have `closed`,
# `isclosed`, or `fileno`. Unlikely, but possible.
self.assertEqual(resp3.closed, True)
self.assertRaises(IOError, resp3.fileno)
def test_io_bufferedreader(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
br = BufferedReader(resp)
self.assertEqual(br.read(), b'foo')
br.close()
self.assertEqual(resp.closed, True)
def test_streaming(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
self.assertEqual(next(stream), b'fo')
self.assertEqual(next(stream), b'o')
self.assertRaises(StopIteration, next, stream)
def test_streaming_tell(self):
fp = BytesIO(b'foo')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
position = 0
position += len(next(stream))
self.assertEqual(2, position)
self.assertEqual(position, resp.tell())
position += len(next(stream))
self.assertEqual(3, position)
self.assertEqual(position, resp.tell())
self.assertRaises(StopIteration, next, stream)
def test_gzipped_streaming(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'gzip'},
preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'f')
self.assertEqual(next(stream), b'oo')
self.assertRaises(StopIteration, next, stream)
def test_gzipped_streaming_tell(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, 16 + zlib.MAX_WBITS)
uncompressed_data = b'foo'
data = compress.compress(uncompressed_data)
data += compress.flush()
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'gzip'},
preload_content=False)
stream = resp.stream()
# Read everything
payload = next(stream)
self.assertEqual(payload, uncompressed_data)
self.assertEqual(len(data), resp.tell())
self.assertRaises(StopIteration, next, stream)
def test_deflate_streaming_tell_intermediate_point(self):
# Ensure that ``tell()`` returns the correct number of bytes when
# part-way through streaming compressed content.
import zlib
NUMBER_OF_READS = 10
class MockCompressedDataReading(BytesIO):
"""
A ByteIO-like reader returning ``payload`` in ``NUMBER_OF_READS``
calls to ``read``.
"""
def __init__(self, payload, payload_part_size):
self.payloads = [
payload[i*payload_part_size:(i+1)*payload_part_size]
for i in range(NUMBER_OF_READS+1)]
assert b"".join(self.payloads) == payload
def read(self, _):
# Amount is unused.
if len(self.payloads) > 0:
return self.payloads.pop(0)
return b""
uncompressed_data = zlib.decompress(ZLIB_PAYLOAD)
payload_part_size = len(ZLIB_PAYLOAD) // NUMBER_OF_READS
fp = MockCompressedDataReading(ZLIB_PAYLOAD, payload_part_size)
resp = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
stream = resp.stream()
parts_positions = [(part, resp.tell()) for part in stream]
end_of_stream = resp.tell()
self.assertRaises(StopIteration, next, stream)
parts, positions = zip(*parts_positions)
# Check that the payload is equal to the uncompressed data
payload = b"".join(parts)
self.assertEqual(uncompressed_data, payload)
# Check that the positions in the stream are correct
expected = [(i+1)*payload_part_size for i in range(NUMBER_OF_READS)]
self.assertEqual(expected, list(positions))
# Check that the end of the stream is in the correct place
self.assertEqual(len(ZLIB_PAYLOAD), end_of_stream)
def test_deflate_streaming(self):
import zlib
data = zlib.compress(b'foo')
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'f')
self.assertEqual(next(stream), b'oo')
self.assertRaises(StopIteration, next, stream)
def test_deflate2_streaming(self):
import zlib
compress = zlib.compressobj(6, zlib.DEFLATED, -zlib.MAX_WBITS)
data = compress.compress(b'foo')
data += compress.flush()
fp = BytesIO(data)
resp = HTTPResponse(fp, headers={'content-encoding': 'deflate'},
preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'f')
self.assertEqual(next(stream), b'oo')
self.assertRaises(StopIteration, next, stream)
def test_empty_stream(self):
fp = BytesIO(b'')
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2, decode_content=False)
self.assertRaises(StopIteration, next, stream)
def test_mock_httpresponse_stream(self):
# Mock out a HTTP Request that does enough to make it through urllib3's
# read() and close() calls, and also exhausts and underlying file
# object.
class MockHTTPRequest(object):
self.fp = None
def read(self, amt):
data = self.fp.read(amt)
if not data:
self.fp = None
return data
def close(self):
self.fp = None
bio = BytesIO(b'foo')
fp = MockHTTPRequest()
fp.fp = bio
resp = HTTPResponse(fp, preload_content=False)
stream = resp.stream(2)
self.assertEqual(next(stream), b'fo')
self.assertEqual(next(stream), b'o')
self.assertRaises(StopIteration, next, stream)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2007 Twisted Matrix Laboratories.
# See LICENSE for details
"""
This module tests twisted.conch.ssh.connection.
"""
import struct
from twisted.conch import error
from twisted.conch.ssh import channel, common, connection
from twisted.trial import unittest
from twisted.conch.test import test_userauth
class TestChannel(channel.SSHChannel):
"""
A mocked-up version of twisted.conch.ssh.channel.SSHChannel.
@ivar gotOpen: True if channelOpen has been called.
@type gotOpen: C{bool}
@ivar specificData: the specific channel open data passed to channelOpen.
@type specificData: C{str}
@ivar openFailureReason: the reason passed to openFailed.
@type openFailed: C{error.ConchError}
@ivar inBuffer: a C{list} of strings received by the channel.
@type inBuffer: C{list}
@ivar extBuffer: a C{list} of 2-tuples (type, extended data) of received by
the channel.
@type extBuffer: C{list}
@ivar numberRequests: the number of requests that have been made to this
channel.
@type numberRequests: C{int}
@ivar gotEOF: True if the other side sent EOF.
@type gotEOF: C{bool}
@ivar gotOneClose: True if the other side closed the connection.
@type gotOneClose: C{bool}
@ivar gotClosed: True if the channel is closed.
@type gotClosed: C{bool}
"""
name = "TestChannel"
gotOpen = False
def logPrefix(self):
return "TestChannel %i" % self.id
def channelOpen(self, specificData):
"""
The channel is open. Set up the instance variables.
"""
self.gotOpen = True
self.specificData = specificData
self.inBuffer = []
self.extBuffer = []
self.numberRequests = 0
self.gotEOF = False
self.gotOneClose = False
self.gotClosed = False
def openFailed(self, reason):
"""
Opening the channel failed. Store the reason why.
"""
self.openFailureReason = reason
def request_test(self, data):
"""
A test request. Return True if data is 'data'.
@type data: C{str}
"""
self.numberRequests += 1
return data == 'data'
def dataReceived(self, data):
"""
Data was received. Store it in the buffer.
"""
self.inBuffer.append(data)
def extReceived(self, code, data):
"""
Extended data was received. Store it in the buffer.
"""
self.extBuffer.append((code, data))
def eofReceived(self):
"""
EOF was received. Remember it.
"""
self.gotEOF = True
def closeReceived(self):
"""
Close was received. Remember it.
"""
self.gotOneClose = True
def closed(self):
"""
The channel is closed. Rembember it.
"""
self.gotClosed = True
class TestAvatar:
"""
A mocked-up version of twisted.conch.avatar.ConchUser
"""
def lookupChannel(self, channelType, windowSize, maxPacket, data):
"""
The server wants us to return a channel. If the requested channel is
our TestChannel, return it, otherwise return None.
"""
if channelType == TestChannel.name:
return TestChannel(remoteWindow=windowSize,
remoteMaxPacket=maxPacket,
data=data, avatar=self)
def gotGlobalRequest(self, requestType, data):
"""
The client has made a global request. If the global request is
'TestGlobal', return True. If the global request is 'TestData',
return True and the request-specific data we received. Otherwise,
return False.
"""
if requestType == 'TestGlobal':
return True
elif requestType == 'TestData':
return True, data
else:
return False
class TestConnection(connection.SSHConnection):
"""
A subclass of SSHConnection for testing.
@ivar channel: the current channel.
@type channel. C{TestChannel}
"""
def logPrefix(self):
return "TestConnection"
def global_TestGlobal(self, data):
"""
The other side made the 'TestGlobal' global request. Return True.
"""
return True
def global_Test_Data(self, data):
"""
The other side made the 'Test-Data' global request. Return True and
the data we received.
"""
return True, data
def channel_TestChannel(self, windowSize, maxPacket, data):
"""
The other side is requesting the TestChannel. Create a C{TestChannel}
instance, store it, and return it.
"""
self.channel = TestChannel(remoteWindow=windowSize,
remoteMaxPacket=maxPacket, data=data)
return self.channel
def channel_ErrorChannel(self, windowSize, maxPacket, data):
"""
The other side is requesting the ErrorChannel. Raise an exception.
"""
raise AssertionError('no such thing')
class ConnectionTestCase(unittest.TestCase):
if test_userauth.transport is None:
skip = "Cannot run without PyCrypto"
def setUp(self):
self.transport = test_userauth.FakeTransport(None)
self.transport.avatar = TestAvatar()
self.conn = TestConnection()
self.conn.transport = self.transport
self.conn.serviceStarted()
def _openChannel(self, channel):
"""
Open the channel with the default connection.
"""
self.conn.openChannel(channel)
self.transport.packets = self.transport.packets[:-1]
self.conn.ssh_CHANNEL_OPEN_CONFIRMATION(struct.pack('>2L',
channel.id, 255) + '\x00\x02\x00\x00\x00\x00\x80\x00')
def tearDown(self):
self.conn.serviceStopped()
def test_linkAvatar(self):
"""
Test that the connection links itself to the avatar in the
transport.
"""
self.assertIdentical(self.transport.avatar.conn, self.conn)
def test_serviceStopped(self):
"""
Test that serviceStopped() closes any open channels.
"""
channel1 = TestChannel()
channel2 = TestChannel()
self.conn.openChannel(channel1)
self.conn.openChannel(channel2)
self.conn.ssh_CHANNEL_OPEN_CONFIRMATION('\x00\x00\x00\x00' * 4)
self.assertTrue(channel1.gotOpen)
self.assertFalse(channel2.gotOpen)
self.conn.serviceStopped()
self.assertTrue(channel1.gotClosed)
def test_GLOBAL_REQUEST(self):
"""
Test that global request packets are dispatched to the global_*
methods and the return values are translated into success or failure
messages.
"""
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestGlobal') + '\xff')
self.assertEquals(self.transport.packets,
[(connection.MSG_REQUEST_SUCCESS, '')])
self.transport.packets = []
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestData') + '\xff' +
'test data')
self.assertEquals(self.transport.packets,
[(connection.MSG_REQUEST_SUCCESS, 'test data')])
self.transport.packets = []
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestBad') + '\xff')
self.assertEquals(self.transport.packets,
[(connection.MSG_REQUEST_FAILURE, '')])
self.transport.packets = []
self.conn.ssh_GLOBAL_REQUEST(common.NS('TestGlobal') + '\x00')
self.assertEquals(self.transport.packets, [])
def test_REQUEST_SUCCESS(self):
"""
Test that global request success packets cause the Deferred to be
called back.
"""
d = self.conn.sendGlobalRequest('request', 'data', True)
self.conn.ssh_REQUEST_SUCCESS('data')
def check(data):
self.assertEquals(data, 'data')
d.addCallback(check)
d.addErrback(self.fail)
return d
def test_REQUEST_FAILURE(self):
"""
Test that global request failure packets cause the Deferred to be
erred back.
"""
d = self.conn.sendGlobalRequest('request', 'data', True)
self.conn.ssh_REQUEST_FAILURE('data')
def check(f):
self.assertEquals(f.value.data, 'data')
d.addCallback(self.fail)
d.addErrback(check)
return d
def test_CHANNEL_OPEN(self):
"""
Test that open channel packets cause a channel to be created and
opened or a failure message to be returned.
"""
del self.transport.avatar
self.conn.ssh_CHANNEL_OPEN(common.NS('TestChannel') +
'\x00\x00\x00\x01' * 4)
self.assertTrue(self.conn.channel.gotOpen)
self.assertEquals(self.conn.channel.conn, self.conn)
self.assertEquals(self.conn.channel.data, '\x00\x00\x00\x01')
self.assertEquals(self.conn.channel.specificData, '\x00\x00\x00\x01')
self.assertEquals(self.conn.channel.remoteWindowLeft, 1)
self.assertEquals(self.conn.channel.remoteMaxPacket, 1)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN_CONFIRMATION,
'\x00\x00\x00\x01\x00\x00\x00\x00\x00\x02\x00\x00'
'\x00\x00\x80\x00')])
self.transport.packets = []
self.conn.ssh_CHANNEL_OPEN(common.NS('BadChannel') +
'\x00\x00\x00\x02' * 4)
self.flushLoggedErrors()
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN_FAILURE,
'\x00\x00\x00\x02\x00\x00\x00\x03' + common.NS(
'unknown channel') + common.NS(''))])
self.transport.packets = []
self.conn.ssh_CHANNEL_OPEN(common.NS('ErrorChannel') +
'\x00\x00\x00\x02' * 4)
self.flushLoggedErrors()
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN_FAILURE,
'\x00\x00\x00\x02\x00\x00\x00\x02' + common.NS(
'unknown failure') + common.NS(''))])
def test_CHANNEL_OPEN_CONFIRMATION(self):
"""
Test that channel open confirmation packets cause the channel to be
notified that it's open.
"""
channel = TestChannel()
self.conn.openChannel(channel)
self.conn.ssh_CHANNEL_OPEN_CONFIRMATION('\x00\x00\x00\x00'*5)
self.assertEquals(channel.remoteWindowLeft, 0)
self.assertEquals(channel.remoteMaxPacket, 0)
self.assertEquals(channel.specificData, '\x00\x00\x00\x00')
self.assertEquals(self.conn.channelsToRemoteChannel[channel],
0)
self.assertEquals(self.conn.localToRemoteChannel[0], 0)
def test_CHANNEL_OPEN_FAILURE(self):
"""
Test that channel open failure packets cause the channel to be
notified that its opening failed.
"""
channel = TestChannel()
self.conn.openChannel(channel)
self.conn.ssh_CHANNEL_OPEN_FAILURE('\x00\x00\x00\x00\x00\x00\x00'
'\x01' + common.NS('failure!'))
self.assertEquals(channel.openFailureReason.args, ('failure!', 1))
self.assertEquals(self.conn.channels.get(channel), None)
def test_CHANNEL_WINDOW_ADJUST(self):
"""
Test that channel window adjust messages add bytes to the channel
window.
"""
channel = TestChannel()
self._openChannel(channel)
oldWindowSize = channel.remoteWindowLeft
self.conn.ssh_CHANNEL_WINDOW_ADJUST('\x00\x00\x00\x00\x00\x00\x00'
'\x01')
self.assertEquals(channel.remoteWindowLeft, oldWindowSize + 1)
def test_CHANNEL_DATA(self):
"""
Test that channel data messages are passed up to the channel, or
cause the channel to be closed if the data is too large.
"""
channel = TestChannel(localWindow=6, localMaxPacket=5)
self._openChannel(channel)
self.conn.ssh_CHANNEL_DATA('\x00\x00\x00\x00' + common.NS('data'))
self.assertEquals(channel.inBuffer, ['data'])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_WINDOW_ADJUST, '\x00\x00\x00\xff'
'\x00\x00\x00\x04')])
self.transport.packets = []
longData = 'a' * (channel.localWindowLeft + 1)
self.conn.ssh_CHANNEL_DATA('\x00\x00\x00\x00' + common.NS(longData))
self.assertEquals(channel.inBuffer, ['data'])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
channel = TestChannel()
self._openChannel(channel)
bigData = 'a' * (channel.localMaxPacket + 1)
self.transport.packets = []
self.conn.ssh_CHANNEL_DATA('\x00\x00\x00\x01' + common.NS(bigData))
self.assertEquals(channel.inBuffer, [])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
def test_CHANNEL_EXTENDED_DATA(self):
"""
Test that channel extended data messages are passed up to the channel,
or cause the channel to be closed if they're too big.
"""
channel = TestChannel(localWindow=6, localMaxPacket=5)
self._openChannel(channel)
self.conn.ssh_CHANNEL_EXTENDED_DATA('\x00\x00\x00\x00\x00\x00\x00'
'\x00' + common.NS('data'))
self.assertEquals(channel.extBuffer, [(0, 'data')])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_WINDOW_ADJUST, '\x00\x00\x00\xff'
'\x00\x00\x00\x04')])
self.transport.packets = []
longData = 'a' * (channel.localWindowLeft + 1)
self.conn.ssh_CHANNEL_EXTENDED_DATA('\x00\x00\x00\x00\x00\x00\x00'
'\x00' + common.NS(longData))
self.assertEquals(channel.extBuffer, [(0, 'data')])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
channel = TestChannel()
self._openChannel(channel)
bigData = 'a' * (channel.localMaxPacket + 1)
self.transport.packets = []
self.conn.ssh_CHANNEL_EXTENDED_DATA('\x00\x00\x00\x01\x00\x00\x00'
'\x00' + common.NS(bigData))
self.assertEquals(channel.extBuffer, [])
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
def test_CHANNEL_EOF(self):
"""
Test that channel eof messages are passed up to the channel.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.ssh_CHANNEL_EOF('\x00\x00\x00\x00')
self.assertTrue(channel.gotEOF)
def test_CHANNEL_CLOSE(self):
"""
Test that channel close messages are passed up to the channel. Also,
test that channel.close() is called if both sides are closed when this
message is received.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendClose(channel)
self.conn.ssh_CHANNEL_CLOSE('\x00\x00\x00\x00')
self.assertTrue(channel.gotOneClose)
self.assertTrue(channel.gotClosed)
def test_CHANNEL_REQUEST_success(self):
"""
Test that channel requests that succeed send MSG_CHANNEL_SUCCESS.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.ssh_CHANNEL_REQUEST('\x00\x00\x00\x00' + common.NS('test')
+ '\x00')
self.assertEquals(channel.numberRequests, 1)
d = self.conn.ssh_CHANNEL_REQUEST('\x00\x00\x00\x00' + common.NS(
'test') + '\xff' + 'data')
def check(result):
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_SUCCESS, '\x00\x00\x00\xff')])
d.addCallback(check)
return d
def test_CHANNEL_REQUEST_failure(self):
"""
Test that channel requests that fail send MSG_CHANNEL_FAILURE.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.ssh_CHANNEL_REQUEST('\x00\x00\x00\x00' + common.NS(
'test') + '\xff')
def check(result):
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_FAILURE, '\x00\x00\x00\xff'
)])
d.addCallback(self.fail)
d.addErrback(check)
return d
def test_CHANNEL_REQUEST_SUCCESS(self):
"""
Test that channel request success messages cause the Deferred to be
called back.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.sendRequest(channel, 'test', 'data', True)
self.conn.ssh_CHANNEL_SUCCESS('\x00\x00\x00\x00')
def check(result):
self.assertTrue(result)
return d
def test_CHANNEL_REQUEST_FAILURE(self):
"""
Test that channel request failure messages cause the Deferred to be
erred back.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.sendRequest(channel, 'test', '', True)
self.conn.ssh_CHANNEL_FAILURE('\x00\x00\x00\x00')
def check(result):
self.assertEquals(result.value.value, 'channel request failed')
d.addCallback(self.fail)
d.addErrback(check)
return d
def test_sendGlobalRequest(self):
"""
Test that global request messages are sent in the right format.
"""
d = self.conn.sendGlobalRequest('wantReply', 'data', True)
self.conn.sendGlobalRequest('noReply', '', False)
self.assertEquals(self.transport.packets,
[(connection.MSG_GLOBAL_REQUEST, common.NS('wantReply') +
'\xffdata'),
(connection.MSG_GLOBAL_REQUEST, common.NS('noReply') +
'\x00')])
self.assertEquals(self.conn.deferreds, {'global':[d]})
def test_openChannel(self):
"""
Test that open channel messages are sent in the right format.
"""
channel = TestChannel()
self.conn.openChannel(channel, 'aaaa')
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_OPEN, common.NS('TestChannel') +
'\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x80\x00aaaa')])
self.assertEquals(channel.id, 0)
self.assertEquals(self.conn.localChannelID, 1)
def test_sendRequest(self):
"""
Test that channel request messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
d = self.conn.sendRequest(channel, 'test', 'test', True)
self.conn.sendRequest(channel, 'test2', '', False)
channel.localClosed = True # emulate sending a close message
self.conn.sendRequest(channel, 'test3', '', True)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_REQUEST, '\x00\x00\x00\xff' +
common.NS('test') + '\x01test'),
(connection.MSG_CHANNEL_REQUEST, '\x00\x00\x00\xff' +
common.NS('test2') + '\x00')])
self.assertEquals(self.conn.deferreds, {0:[d]})
def test_adjustWindow(self):
"""
Test that channel window adjust messages cause bytes to be added
to the window.
"""
channel = TestChannel(localWindow=5)
self._openChannel(channel)
channel.localWindowLeft = 0
self.conn.adjustWindow(channel, 1)
self.assertEquals(channel.localWindowLeft, 1)
channel.localClosed = True
self.conn.adjustWindow(channel, 2)
self.assertEquals(channel.localWindowLeft, 1)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_WINDOW_ADJUST, '\x00\x00\x00\xff'
'\x00\x00\x00\x01')])
def test_sendData(self):
"""
Test that channel data messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendData(channel, 'a')
channel.localClosed = True
self.conn.sendData(channel, 'b')
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_DATA, '\x00\x00\x00\xff' +
common.NS('a'))])
def test_sendExtendedData(self):
"""
Test that channel extended data messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendExtendedData(channel, 1, 'test')
channel.localClosed = True
self.conn.sendExtendedData(channel, 2, 'test2')
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_EXTENDED_DATA, '\x00\x00\x00\xff' +
'\x00\x00\x00\x01' + common.NS('test'))])
def test_sendEOF(self):
"""
Test that channel EOF messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendEOF(channel)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_EOF, '\x00\x00\x00\xff')])
channel.localClosed = True
self.conn.sendEOF(channel)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_EOF, '\x00\x00\x00\xff')])
def test_sendClose(self):
"""
Test that channel close messages are sent in the right format.
"""
channel = TestChannel()
self._openChannel(channel)
self.conn.sendClose(channel)
self.assertTrue(channel.localClosed)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
self.conn.sendClose(channel)
self.assertEquals(self.transport.packets,
[(connection.MSG_CHANNEL_CLOSE, '\x00\x00\x00\xff')])
channel2 = TestChannel()
self._openChannel(channel2)
channel2.remoteClosed = True
self.conn.sendClose(channel2)
self.assertTrue(channel2.gotClosed)
def test_getChannelWithAvatar(self):
"""
Test that getChannel dispatches to the avatar when an avatar is
present. Correct functioning without the avatar is verified in
test_CHANNEL_OPEN.
"""
channel = self.conn.getChannel('TestChannel', 50, 30, 'data')
self.assertEquals(channel.data, 'data')
self.assertEquals(channel.remoteWindowLeft, 50)
self.assertEquals(channel.remoteMaxPacket, 30)
self.assertRaises(error.ConchError, self.conn.getChannel,
'BadChannel', 50, 30, 'data')
def test_gotGlobalRequestWithoutAvatar(self):
"""
Test that gotGlobalRequests dispatches to global_* without an avatar.
"""
del self.transport.avatar
self.assertTrue(self.conn.gotGlobalRequest('TestGlobal', 'data'))
self.assertEquals(self.conn.gotGlobalRequest('Test-Data', 'data'),
(True, 'data'))
self.assertFalse(self.conn.gotGlobalRequest('BadGlobal', 'data'))
|
|
import os
import re
from django import forms
from django.conf import settings
from django.forms import ModelForm
from django.forms.models import modelformset_factory
from django.template import Context, Template, TemplateSyntaxError
import happyforms
from piston.models import Consumer
from product_details import product_details
from tower import ugettext_lazy as _lazy
from quieter_formset.formset import BaseModelFormSet
import amo
from addons.models import Addon
from amo.urlresolvers import reverse
from applications.models import Application, AppVersion
from bandwagon.models import Collection, FeaturedCollection, MonthlyPick
from files.models import File
from zadmin.models import ValidationJob
class BulkValidationForm(happyforms.ModelForm):
application = forms.ChoiceField(
label=_lazy(u'Application'),
choices=[(a.id, a.pretty) for a in amo.APPS_ALL.values()])
curr_max_version = forms.ChoiceField(
label=_lazy(u'Current Max. Version'),
choices=[('', _lazy(u'Select an application first'))])
target_version = forms.ChoiceField(
label=_lazy(u'Target Version'),
choices=[('', _lazy(u'Select an application first'))])
finish_email = forms.CharField(required=False,
label=_lazy(u'Email when finished'))
class Meta:
model = ValidationJob
fields = ('application', 'curr_max_version', 'target_version',
'finish_email')
def __init__(self, *args, **kw):
kw.setdefault('initial', {})
kw['initial']['finish_email'] = settings.FLIGTAR
super(BulkValidationForm, self).__init__(*args, **kw)
w = self.fields['application'].widget
# Get the URL after the urlconf has loaded.
w.attrs['data-url'] = reverse('zadmin.application_versions_json')
def version_choices_for_app_id(self, app_id):
versions = AppVersion.objects.filter(application__id=app_id)
return [(v.id, v.version) for v in versions]
def clean_application(self):
app_id = int(self.cleaned_data['application'])
app = Application.objects.get(pk=app_id)
self.cleaned_data['application'] = app
choices = self.version_choices_for_app_id(app_id)
self.fields['target_version'].choices = choices
self.fields['curr_max_version'].choices = choices
return self.cleaned_data['application']
def _clean_appversion(self, field):
return AppVersion.objects.get(pk=int(field))
def clean_curr_max_version(self):
return self._clean_appversion(self.cleaned_data['curr_max_version'])
def clean_target_version(self):
return self._clean_appversion(self.cleaned_data['target_version'])
path = os.path.join(settings.ROOT, 'apps/zadmin/templates/zadmin')
texts = {
'success': open('%s/%s' % (path, 'success.txt')).read(),
'failure': open('%s/%s' % (path, 'failure.txt')).read(),
}
varname = re.compile(r'{{\s*([a-zA-Z0-9_]+)\s*}}')
class NotifyForm(happyforms.Form):
subject = forms.CharField(widget=forms.TextInput, required=True)
preview_only = forms.BooleanField(initial=True, required=False,
label=_lazy(u'Log emails instead of sending'))
text = forms.CharField(widget=forms.Textarea, required=True)
variables = ['{{ADDON_NAME}}', '{{ADDON_VERSION}}', '{{APPLICATION}}',
'{{COMPAT_LINK}}', '{{RESULT_LINKS}}', '{{VERSION}}']
variable_names = [varname.match(v).group(1) for v in variables]
def __init__(self, *args, **kw):
kw.setdefault('initial', {})
if 'text' in kw:
kw['initial']['text'] = texts[kw.pop('text')]
kw['initial']['subject'] = ('{{ADDON_NAME}} {{ADDON_VERSION}} '
'compatibility with '
'{{APPLICATION}} {{VERSION}}')
super(NotifyForm, self).__init__(*args, **kw)
def check_template(self, data):
try:
Template(data).render(Context({}))
except TemplateSyntaxError, err:
raise forms.ValidationError(err)
for name in varname.findall(data):
if name not in self.variable_names:
raise forms.ValidationError(
u'Variable {{%s}} is not a valid variable' % name)
return data
def clean_text(self):
return self.check_template(self.cleaned_data['text'])
def clean_subject(self):
return self.check_template(self.cleaned_data['subject'])
class FeaturedCollectionForm(happyforms.ModelForm):
LOCALES = (('', u'(Default Locale)'),) + tuple(
(i, product_details.languages[i]['native'])
for i in settings.AMO_LANGUAGES)
application = forms.ModelChoiceField(Application.objects.all())
collection = forms.CharField(widget=forms.HiddenInput)
locale = forms.ChoiceField(choices=LOCALES, required=False)
class Meta:
model = FeaturedCollection
fields = ('application', 'locale')
def clean_collection(self):
application = self.cleaned_data.get('application', None)
collection = self.cleaned_data.get('collection', None)
if not Collection.objects.filter(id=collection,
application=application).exists():
raise forms.ValidationError(
u'Invalid collection for this application.')
return collection
def save(self, commit=False):
collection = self.cleaned_data['collection']
f = super(FeaturedCollectionForm, self).save(commit=commit)
f.collection = Collection.objects.get(id=collection)
f.save()
return f
class BaseFeaturedCollectionFormSet(BaseModelFormSet):
def __init__(self, *args, **kw):
super(BaseFeaturedCollectionFormSet, self).__init__(*args, **kw)
for form in self.initial_forms:
try:
form.initial['collection'] = (FeaturedCollection.objects
.get(id=form.instance.id).collection.id)
except (FeaturedCollection.DoesNotExist, Collection.DoesNotExist):
form.initial['collection'] = None
FeaturedCollectionFormSet = modelformset_factory(FeaturedCollection,
form=FeaturedCollectionForm, formset=BaseFeaturedCollectionFormSet,
can_delete=True, extra=0)
class OAuthConsumerForm(happyforms.ModelForm):
class Meta:
model = Consumer
fields = ['name', 'description', 'status']
class MonthlyPickForm(happyforms.ModelForm):
class Meta:
model = MonthlyPick
widgets = {
'addon': forms.TextInput(),
'blurb': forms.Textarea(attrs={'cols': 20, 'rows': 2})
}
MonthlyPickFormSet = modelformset_factory(MonthlyPick, form=MonthlyPickForm,
can_delete=True, extra=0)
class AddonStatusForm(ModelForm):
class Meta:
model = Addon
fields = ('status', 'highest_status')
class FileStatusForm(ModelForm):
class Meta:
model = File
fields = ('status',)
FileFormSet = modelformset_factory(File, form=FileStatusForm,
formset=BaseModelFormSet, extra=0)
|
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import wx
from atom.api import Typed
from enaml.widgets.scroll_area import ProxyScrollArea
from .wx_container import WxContainer
from .wx_frame import WxFrame
from .wx_single_widget_sizer import wxSingleWidgetSizer
# The 'always_on' scroll policy is not supported on wx, because it
# requires setting a window style flag which does not dynamically
# toggle in a reliable fashion. Since we only support 'off' or 'auto'
# it's easiest to use this mapping to convert straight from policy
# values into a respective scroll rate. A rate of Zero causes wx not
# to show the scroll bar. A positive rate indicates to scroll that many
# pixels per event. We set the rate to 1 to have smooth scrolling. Wx
# doesn't make a distinction between scroll events caused by the mouse
# or scrollbar and those caused by clicking the scroll buttons (ala qt),
# and thus this rate applies the same to all of those events. Since we
# expect that clicking on a scroll button happens much more infrequently
# than scrolling by dragging the scroll bar, we opt for a lower rate
# in order to get smooth drag scrolling and sacrifice some usability
# on the scroll buttons.
SCROLLBAR_MAP = {
'as_needed': 1,
'always_off': 0,
'always_on': 1,
}
class wxScrollAreaSizer(wxSingleWidgetSizer):
""" A wxSingleWidgetSizer subclass which makes adjusts the min
size to account for a 2 pixel error in Wx.
"""
def CalcMin(self):
""" Returns the minimum size for the area owned by the sizer.
Returns
-------
result : wxSize
The wx size representing the minimum area required by the
sizer.
"""
# The effective min size computation is correct, but the wx
# scrolled window interprets it with an error of 2px. That
# is we need to make wx think that the min size is 2px smaller
# than it actually is so that scroll bars should and hide at
# the appropriate sizes.
res = super(wxScrollAreaSizer, self).CalcMin()
if res.IsFullySpecified():
res.width -= 2
res.height -= 2
return res
class wxScrollArea(wx.ScrolledWindow):
""" A custom wx.ScrolledWindow which is suits Enaml's use case.
"""
#: The internal best size. The same as QAbstractScrollArea.
_best_size = wx.Size(256, 192)
def __init__(self, *args, **kwargs):
""" Initialize a wxScrollArea.
Parameters
----------
*args, **kwargs
The positional and keyword arguments needed to initialize
a wxScrolledWindow.
"""
super(wxScrollArea, self).__init__(*args, **kwargs)
self._scroll_widget = None
self.SetSizer(wxScrollAreaSizer())
def GetBestSize(self):
""" An overridden parent class method which returns a sensible
best size.
The default wx implementation returns a best size of (16, 16)
on Windows; far too small to be useful. So, we just adopt the
size hint of (256, 192) used in Qt's QAbstractScrollArea.
"""
return self._best_size
def GetScrollWidget(self):
""" Get the scroll widget for this scroll area.
Returns
-------
results : wxWindow
The wxWindow being scrolled by this scroll area.
"""
return self._scroll_widget
def SetScrollWidget(self, widget):
""" Set the scroll widget for this scroll area.
Parameters
----------
widget : wxWindow
The wxWindow which should be scrolled by this area.
"""
self._scroll_widget = widget
self.GetSizer().Add(widget)
class WxScrollArea(WxFrame, ProxyScrollArea):
""" A Wx implementation of an Enaml ScrollArea.
"""
#: A reference to the widget created by the proxy.
widget = Typed(wxScrollArea)
def create_widget(self):
""" Create the underlying wxScrolledWindow widget.
"""
style = wx.HSCROLL | wx.VSCROLL | wx.BORDER_SIMPLE
self.widget = wxScrollArea(self.parent_widget(), style=style)
def init_widget(self):
""" Initialize the underlying widget.
"""
super(WxScrollArea, self).init_widget()
d = self.declaration
self.set_horizontal_policy(d.horizontal_policy)
self.set_vertical_policy(d.vertical_policy)
self.set_widget_resizable(d.widget_resizable)
def init_layout(self):
""" Handle the layout initialization for the scroll area.
"""
super(WxScrollArea, self).init_layout()
self.widget.SetScrollWidget(self.scroll_widget())
#--------------------------------------------------------------------------
# Utility Methods
#--------------------------------------------------------------------------
def scroll_widget(self):
""" Find and return the scroll widget child for this widget.
"""
w = self.declaration.scroll_widget()
if w is not None:
return w.proxy.widget or None
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def child_added(self, child):
""" Handle the child added event for a WxScrollArea.
"""
super(WxScrollArea, self).child_added(child)
if isinstance(child, WxContainer):
self.widget.SetScrollWidget(self.scroll_widget())
def child_removed(self, child):
""" Handle the child removed event for a WxScrollArea.
"""
super(WxScrollArea, self).child_removed(child)
if isinstance(child, WxContainer):
self.widget.SetScrollWidget(self.scroll_widget())
#--------------------------------------------------------------------------
# Overrides
#--------------------------------------------------------------------------
def replace_constraints(self, old_cns, new_cns):
""" A reimplemented WxConstraintsWidget layout method.
Constraints layout may not cross the boundary of a ScrollArea,
so this method is no-op which stops the layout propagation.
"""
pass
#--------------------------------------------------------------------------
# ProxyScrollArea API
#--------------------------------------------------------------------------
def set_horizontal_policy(self, policy):
""" Set the horizontal scrollbar policy of the widget.
"""
horiz = SCROLLBAR_MAP[policy]
vert = SCROLLBAR_MAP[self.declaration.vertical_policy]
self.widget.SetScrollRate(horiz, vert)
def set_vertical_policy(self, policy):
""" Set the vertical scrollbar policy of the widget.
"""
horiz = SCROLLBAR_MAP[self.declaration.horizontal_policy]
vert = SCROLLBAR_MAP[policy]
self.widget.SetScrollRate(horiz, vert)
def set_widget_resizable(self, resizable):
""" Set whether or not the scroll widget is resizable.
This is not supported on Wx.
"""
pass
|
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is adapted from PyTorch Lightning.
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/
# pl_examples/domain_templates/semantic_segmentation.py
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from argparse import ArgumentParser, Namespace
import numpy as np
import torch
import torch.nn.functional as F
from bigdl.nano.pytorch.vision.transforms import transforms
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from typing import Union
import pytorch_lightning as pl
from pl_examples import cli_lightning_logo
from pl_examples.domain_templates.unet import UNet
from pytorch_lightning.utilities.argparse import from_argparse_args
from bigdl.nano.pytorch.trainer import Trainer
DEFAULT_VOID_LABELS = (0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1)
DEFAULT_VALID_LABELS = (7, 8, 11, 12, 13, 17, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33)
def _create_synth_kitti_dataset(path_dir: str, image_dims: tuple = (1024, 512)):
"""
Create synthetic dataset with random images,
just to simulate that the dataset have been already downloaded.
"""
path_dir_images = os.path.join(path_dir, KITTI.IMAGE_PATH)
path_dir_masks = os.path.join(path_dir, KITTI.MASK_PATH)
for p_dir in (path_dir_images, path_dir_masks):
os.makedirs(p_dir, exist_ok=True)
for i in range(3):
path_img = os.path.join(path_dir_images, f"dummy_kitti_{i}.png")
Image.new("RGB", image_dims).save(path_img)
path_mask = os.path.join(path_dir_masks, f"dummy_kitti_{i}.png")
Image.new("L", image_dims).save(path_mask)
class KITTI(Dataset):
"""
Class for KITTI Semantic Segmentation Benchmark dataset
Dataset link - http://www.cvlibs.net/datasets/kitti/eval_semseg.php?benchmark=semantics2015
Manually download at https://s3.eu-central-1.amazonaws.com/avg-kitti/data_semantics.zip
There are 34 classes in the given labels. However, not all of them are useful for training
(like railings on highways, road dividers, etc.).
So, these useless classes (the pixel values of these classes) are stored in the `void_labels`.
The useful classes are stored in the `valid_labels`.
The `encode_segmap` function sets all pixels with any of the `void_labels` to `ignore_index`
(250 by default). It also sets all of the valid pixels to the appropriate value between 0 and
`len(valid_labels)` (since that is the number of valid classes), so it can be used properly by
the loss function when comparing with the output.
The `get_filenames` function retrieves the filenames of all images in the given `path` and
saves the absolute path in a list.
In the `get_item` function, images and masks are resized to the given `img_size`, masks are
encoded using `encode_segmap`, and given `transform` (if any) are applied to the image only
(mask does not usually require transforms, but they can be implemented in a similar way).
>>> from pl_examples import _DATASETS_PATH
>>> dataset_path = os.path.join(_DATASETS_PATH, "Kitti")
>>> _create_synth_kitti_dataset(dataset_path, image_dims=(1024, 512))
>>> KITTI(dataset_path, 'train') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<...semantic_segmentation.KITTI object at ...>
"""
IMAGE_PATH = os.path.join("training", "image_2")
MASK_PATH = os.path.join("training", "semantic")
def __init__(
self,
data_path: str,
split: str,
img_size: tuple = (1242, 376),
void_labels: tuple = DEFAULT_VOID_LABELS,
valid_labels: tuple = DEFAULT_VALID_LABELS,
transform=None,
):
self.img_size = img_size
self.void_labels = void_labels
self.valid_labels = valid_labels
self.ignore_index = 250
self.class_map = dict(
zip(self.valid_labels, range(len(self.valid_labels))))
self.transform = transform
self.split = split
self.data_path = data_path
self.img_path = os.path.join(self.data_path, self.IMAGE_PATH)
self.mask_path = os.path.join(self.data_path, self.MASK_PATH)
self.img_list = self.get_filenames(self.img_path)
self.mask_list = self.get_filenames(self.mask_path)
# Split between train and valid set (80/20)
random_inst = random.Random(12345) # for repeatability
n_items = len(self.img_list)
idxs = random_inst.sample(range(n_items), n_items // 5)
if self.split == "train":
idxs = [idx for idx in range(n_items) if idx not in idxs]
self.img_list = [self.img_list[i] for i in idxs]
self.mask_list = [self.mask_list[i] for i in idxs]
def __len__(self):
return len(self.img_list)
def __getitem__(self, idx):
img = Image.open(self.img_list[idx])
img = img.resize(self.img_size)
img = np.array(img)
mask = Image.open(self.mask_list[idx]).convert("L")
mask = mask.resize(self.img_size)
mask = np.array(mask)
mask = self.encode_segmap(mask)
if self.transform:
img = self.transform(img)
return img, mask
def encode_segmap(self, mask):
"""
Sets void classes to zero so they won't be considered for training
"""
for voidc in self.void_labels:
mask[mask == voidc] = self.ignore_index
for validc in self.valid_labels:
mask[mask == validc] = self.class_map[validc]
# remove extra idxs from updated dataset
mask[mask > 18] = self.ignore_index
return mask
def get_filenames(self, path):
"""
Returns a list of absolute paths to images inside given `path`
"""
files_list = []
for filename in os.listdir(path):
files_list.append(os.path.join(path, filename))
return files_list
class SegModel(pl.LightningModule):
"""
Semantic Segmentation Module
This is a basic semantic segmentation module implemented with Lightning.
It uses CrossEntropyLoss as the default loss function. May be replaced with
other loss functions as required.
It is specific to KITTI dataset i.e. dataloaders are for KITTI
and Normalize transform uses the mean and standard deviation of this dataset.
It uses the FCN ResNet50 model as an example.
Adam optimizer is used along with Cosine Annealing learning rate scheduler.
>>> from pl_examples import _DATASETS_PATH
>>> dataset_path = os.path.join(_DATASETS_PATH, "Kitti")
>>> _create_synth_kitti_dataset(dataset_path, image_dims=(1024, 512))
>>> SegModel(dataset_path) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
SegModel(
(net): UNet(
(layers): ModuleList(
(0): DoubleConv(...)
(1): Down(...)
(2): Down(...)
(3): Up(...)
(4): Up(...)
(5): Conv2d(64, 19, kernel_size=(1, 1), stride=(1, 1))
)
)
)
"""
def __init__(
self,
data_path: str,
batch_size: int = 4,
lr: float = 1e-3,
num_layers: int = 3,
features_start: int = 64,
bilinear: bool = False,
*args,
**kwargs
):
super().__init__(**kwargs)
self.data_path = data_path
self.batch_size = batch_size
self.lr = lr
self.num_layers = num_layers
self.features_start = features_start
self.bilinear = bilinear
self.net = UNet(
num_classes=19, num_layers=self.num_layers,
features_start=self.features_start, bilinear=self.bilinear
)
self.transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(
mean=[0.35675976, 0.37380189, 0.3764753],
std=[0.32064945, 0.32098866, 0.32325324]
),
]
)
self.trainset = KITTI(self.data_path, split="train",
transform=self.transform)
self.validset = KITTI(self.data_path, split="valid",
transform=self.transform)
def forward(self, x):
return self.net(x)
def training_step(self, batch, batch_nb):
img, mask = batch
img = img.float()
mask = mask.long()
out = self(img)
loss = F.cross_entropy(out, mask, ignore_index=250)
log_dict = {"train_loss": loss.detach()}
return {"loss": loss, "log": log_dict, "progress_bar": log_dict}
def validation_step(self, batch, batch_idx):
img, mask = batch
img = img.float()
mask = mask.long()
out = self(img)
loss_val = F.cross_entropy(out, mask, ignore_index=250)
return {"val_loss": loss_val}
def validation_epoch_end(self, outputs):
loss_val = torch.stack([x["val_loss"] for x in outputs]).mean()
log_dict = {"val_loss": loss_val.detach()}
return {"log": log_dict, "val_loss": log_dict["val_loss"], "progress_bar": log_dict}
def configure_optimizers(self):
opt = torch.optim.Adam(self.net.parameters(), lr=self.lr)
sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=10)
return [opt], [sch]
def train_dataloader(self):
return DataLoader(self.trainset, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.validset, batch_size=self.batch_size, shuffle=False)
@staticmethod
def add_model_specific_args(parent_parser): # pragma: no-cover
parser = parent_parser.add_argument_group("SegModel")
parser.add_argument("--data_path", type=str,
help="path where dataset is stored")
parser.add_argument("--batch_size", type=int,
default=16, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.001,
help="adam: learning rate")
parser.add_argument("--num_layers", type=int,
default=5, help="number of layers on u-net")
parser.add_argument("--features_start", type=float,
default=64, help="number of features in first layer")
parser.add_argument(
"--bilinear", action="store_true", default=False,
help="whether to use bilinear interpolation or transposed"
)
return parent_parser
@classmethod
def from_argparse_args(cls, args: Union[Namespace, ArgumentParser], **kwargs):
return from_argparse_args(cls, args, **kwargs)
def main(hparams: Namespace):
# ------------------------
# 1 INIT LIGHTNING MODEL
# ------------------------
model = SegModel.from_argparse_args(hparams)
# ------------------------
# 2 INIT TRAINER
# ------------------------
trainer = Trainer.from_argparse_args(hparams)
# ------------------------
# 3 START TRAINING
# ------------------------
trainer.fit(model)
if __name__ == "__main__":
cli_lightning_logo()
parser = ArgumentParser(add_help=False)
parser = SegModel.add_model_specific_args(parser)
parser.add_argument("--num_processes", type=int, default=1,
help="The number of processes in distributed training.")
parser.add_argument('--use_ipex', action='store_true', default=False,
help='use intel pytorch extension')
hparams = parser.parse_args()
main(hparams)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import os
import hashlib
import psutil
import urllib.request
import tarfile
from getpass import getuser
from os import getenv, mkdir
from os.path import expanduser, dirname, isdir, isfile, islink, \
join, lexists, normpath, realpath, relpath
from subprocess import check_call, check_output, CalledProcessError, STDOUT
from stat import ST_MODE
from tempfile import NamedTemporaryFile
DEVNULL = open(os.devnull, 'w')
ENTRYFILE = "__init__.py"
GITCONFIG_PATH = join(getenv("HOME"), ".gitconfig")
WORKDIR = realpath(join(dirname(__file__)))
def add_git_config_entry(params):
hash_before = checksum(realpath(GITCONFIG_PATH))
cmd = ["git", "config", "--global"] + params
check_call(cmd)
hash_after = checksum(realpath(GITCONFIG_PATH))
msg = "Added \"{}\" to git config".format(" ".join(params))
if hash_before != hash_after:
changed_print(msg)
else:
ok_print(msg)
def add_user_to_group(user, group):
msg = "current user is in group {}".format(group)
sudo_user = getenv("SUDO_USER", "")
if sudo_user:
cmd = "sudo -u {} groups".format(sudo_user,)
curr_user_groups = check_output(
cmd,
shell=True).decode("utf-8").strip().split()
else:
curr_user_groups = check_output(
["groups"],
universal_newlines=True).strip().split()
if group in curr_user_groups:
ok_print(msg)
else:
launch_silent(
["sudo", "gpasswd", "-a", user, group])
changed_print(msg + ". Please relogin to take effect.")
def changed_print(msg):
print("\033[93mChanged\033[0m {}".format(msg))
def change_java(new_version):
msg = "{} set to default".format(new_version)
java_version = check_output(
["sudo", "archlinux-java", "get"],
universal_newlines=True).strip()
if java_version == new_version:
ok_print(msg)
else:
launch_silent(["sudo", "archlinux-java", "set", new_version])
changed_print(msg)
def checksum(file, algorithm="md5"):
if not lexists(file):
return 0
if algorithm == "md5":
hash = hashlib.md5()
elif algorithm == "sha256":
hash = hashlib.sha256()
content = open(file, "rb").read()
hash.update(content)
return hash.hexdigest()
def chmod(path, mode):
msg = "{} mode changed to {}".format(path, mode)
if get_mode(path) == mode:
ok_print(msg)
else:
launch_silent(["sudo", "chmod", mode, path])
changed_print(msg)
def chsh(user, shell):
msg = "{}'s shell set to {}".format(user, shell)
with open("/etc/passwd", "r") as passwd_file:
for line in passwd_file.readlines():
elements = line.strip().split(":")
if elements[0] == user:
if elements[-1:][0] == shell:
ok_print(msg)
else:
launch_silent(["sudo", "chsh", "-s", shell, user])
changed_print(msg)
def clone(url, path):
path = realpath(path)
sudo_user = getenv("SUDO_USER", "")
msg = "repo {} is cloned and up-to-date into {}".format(url, path)
if lexists(join(path, ".git")) and repo_origin(path) == url:
cmd = ["git", "-C", path, "pull", "--rebase"]
else:
cmd = ["git", "clone", url, path]
if sudo_user:
cmd = ["sudo", "-u", sudo_user] + cmd
launch_silent(cmd)
ok_print(msg)
def create_dir(dst):
norm_dst = normpath(dst)
msg = "{} created".format(norm_dst)
if isdir(norm_dst):
ok_print(msg)
return
elif isfile(norm_dst):
os.remove(norm_dst)
mkdir_p(norm_dst)
changed_print(msg)
def daemon_reload():
try:
launch_silent(["systemctl", "daemon-reload"])
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise CalledProcessError
return True
def enable_service(service):
msg = "{} is enabled".format(service)
if service_enabled(service):
ok_print(msg)
else:
launch_silent(["sudo", "systemctl", "enable", service])
changed_print(msg)
def error_print(msg):
print("\033[91mERROR\033[0m {}".format(msg))
def file_lines(path):
with open(path, "r") as content:
for line in content.readlines():
line = line.strip()
if line and not line.startswith("#"):
yield line
def filter_temp(path):
return not path.startswith((".", "_"))
def get_cmd(line):
array = line.split()
cmd = array[0]
params = " ".join(array[1:])
return [cmd, params]
def get_mode(path):
return oct(os.lstat(path)[ST_MODE])[-3:]
def git_crypt_install():
if not isfile("/usr/local/bin/git-crypt"):
git_crypt_src = join(WORKDIR, "..", "identity", "git-crypt")
launch_silent(["make"], cwd=git_crypt_src)
launch_silent(["sudo", "make", "install"], cwd=git_crypt_src)
changed_print("git-crypt installed")
else:
ok_print("git-crypt installed")
def git_crypt_unlock(repo_path, key_path):
file_type = check_output(
["file", "-b", "--mime-type", join(repo_path, "ssh_config")],
universal_newlines=True).strip()
if file_type == "application/octet-stream":
check_call(["git-crypt", "unlock", key_path], cwd=repo_path)
changed_print("Private repo {} unlocked".format(repo_path))
else:
ok_print("Private repo {} unlocked".format(repo_path))
def info_print(msg):
print("\033[94m{}\033[0m".format(msg))
def install_file(src, dst):
norm_src = normpath(src)
norm_dst = normpath(dst)
msg = "{} -> {}".format(norm_src, norm_dst)
if isfile(norm_dst):
if realpath(norm_dst) == realpath(norm_src):
ok_print(msg)
return
else:
os.remove(norm_dst)
# Dangling link
elif islink(norm_dst):
os.remove(norm_dst)
os.symlink(norm_src, norm_dst)
changed_print(msg)
def install_remote_archive(url, reference_checksum, path):
msg = "{} archive installed in {}".format(url, path)
if isdir(join(path, "google-cloud-sdk")):
ok_print(msg)
return
with NamedTemporaryFile() as temp_file:
with urllib.request.urlopen(url) as download_url:
temp_file.write(download_url.read())
file_checksum = checksum(temp_file.name, "sha256")
if not reference_checksum == file_checksum:
raise Exception(
"""Checksum mismatch!!!
Downloaded file checksum: {}
Reference checksum: {}""",
file_checksum,
checksum
)
with tarfile.open(temp_file.name, "r:gz") as archive:
archive.extractall(path)
sudo_user = getenv("SUDO_USER", "")
launch_silent(["chown", "-R", sudo_user, path])
changed_print(msg)
def install(package):
msg = "{} installed".format(package)
if is_installed(package):
ok_print(msg)
else:
changed_print(msg)
sudo_user = getenv("SUDO_USER", "")
cmd = "yaourt -S --noconfirm {}".format(package)
# Handles demotion since yaourt refuse to be launch as root
if sudo_user:
cmd = "sudo -u {} {}".format(sudo_user, cmd)
launch_silent(cmd, shell=True)
else:
launch_silent(cmd)
def install_tree(src,
dst,
hidden=False,
file_filter=filter_temp,
dir_filter=filter_temp):
create_dir(dst)
for root, dirs, files in os.walk(src):
dst_path = join(dst, relpath(root, src))
for name in [file for file in files if file_filter(file)]:
dst_name = ".{}".format(name) if hidden else name
install_file(join(root, name), join(dst_path, dst_name))
for name in [dir for dir in dirs if dir_filter(dir)]:
dst_name = ".{}".format(name) if hidden else name
create_dir(join(dst_path, dst_name))
def is_installed(package):
try:
launch_silent(["pacman", "-Qs", "^{}$".format(package)])
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise CalledProcessError
return True
def is_laptop():
return psutil.sensors_battery() is not None
def launch_silent(cmd, cwd=None, shell=False):
check_call(cmd, stdout=DEVNULL, stderr=STDOUT, cwd=cwd, shell=shell)
def line_in_file(path, line):
path = realpath(expanduser(path))
msg = "\"{}\" in file \"{}\"".format(line, path)
with open(path, 'a+') as f:
f.seek(0)
for current_line in f.readlines():
if current_line.strip() == line:
ok_print(msg)
return
f.write(line + "\n")
changed_print(msg)
def mkdir_p(path):
curr_path = "/"
for dir in path.split("/"):
curr_path = join(curr_path, dir)
if not lexists(curr_path):
mkdir(curr_path)
def modprobe(module):
msg = "{} module loaded".format(module)
changed_print(msg)
launch_silent(["sudo", "modprobe", module])
def ok_print(msg):
print("\033[92mOK\033[0m {}".format(msg))
def pacman_refresh():
launch_silent(["sudo", "pacman", "-Syy"])
changed_print("Pacman database sync'ed")
launch_silent(["sudo", "pacman-key", "--refresh-keys"])
changed_print("Pacman keys refreshed")
def remove_file(target):
norm_target = normpath(target)
msg = "{} removed".format(norm_target)
if not lexists(norm_target):
ok_print(msg)
elif isdir(norm_target):
error_print(
"{} can't remove since it is not a file.".format(norm_target))
else:
os.remove(norm_target)
changed_print(msg)
def remove_matching_files(src, dst, hidden=False, file_filter=filter_temp):
for root, dirs, files in os.walk(src):
dst_path = join(dst, relpath(root, src))
for name in [file for file in files if file_filter(file)]:
dst_name = ".{}".format(name) if hidden else name
remove_file(join(dst_path, dst_name))
# Do not touch dirs, since it is too dangerous
def remove(package):
msg = "{} removed".format(package)
if is_installed(package):
changed_print(msg)
launch_silent(["yaourt", "-R", "--noconfirm", package])
else:
ok_print(msg)
def remove_tree(target):
msg = "{} removed".format(target)
if lexists(target):
shutil.rmtree(target)
changed_print(msg)
else:
ok_print(msg)
def repo_origin(path):
return check_output(
["git", "-C", path, "remote", "get-url", "origin"],
universal_newlines=True
).strip()
def service_enabled(service):
try:
launch_silent(["systemctl", "is-enabled", service])
except CalledProcessError as e:
if e.returncode == 1:
return False
else:
raise CalledProcessError
return True
def stop_process(name):
msg = "{} stopped".format(name)
try:
stdout = check_output(["pgrep", "-u", getuser(), name])
except:
ok_print(msg)
return
pid = stdout.splitlines()[0]
check_call(["kill", pid])
changed_print(msg)
def vim_cmd(cmd):
try:
sudo_user = getenv("SUDO_USER", "")
if sudo_user:
vim_cmd = "sudo -u {} vim {}".format(sudo_user, cmd)
else:
vim_cmd = "vim {}".format(cmd)
check_call(vim_cmd, shell=True)
changed_print("vim {} succeeded".format(cmd))
except:
error_print("vim {} failed".format(vim_cmd))
|
|
"""Python part of the warnings subsystem."""
import sys
__all__ = ["warn", "warn_explicit", "showwarning",
"formatwarning", "filterwarnings", "simplefilter",
"resetwarnings", "catch_warnings"]
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Hook to write a warning to a file; replace if you like."""
msg = WarningMessage(message, category, filename, lineno, file, line)
_showwarnmsg_impl(msg)
def formatwarning(message, category, filename, lineno, line=None):
"""Function to format a warning the standard way."""
msg = WarningMessage(message, category, filename, lineno, None, line)
return _formatwarnmsg_impl(msg)
def _showwarnmsg_impl(msg):
file = msg.file
if file is None:
file = sys.stderr
if file is None:
# sys.stderr is None when run with pythonw.exe:
# warnings get lost
return
text = _formatwarnmsg(msg)
try:
file.write(text)
except OSError:
# the file (probably stderr) is invalid - this warning gets lost.
pass
def _formatwarnmsg_impl(msg):
s = ("%s:%s: %s: %s\n"
% (msg.filename, msg.lineno, msg.category.__name__,
msg.message))
if msg.line is None:
try:
import linecache
line = linecache.getline(msg.filename, msg.lineno)
except Exception:
# When a warning is logged during Python shutdown, linecache
# and the import machinery don't work anymore
line = None
linecache = None
else:
line = msg.line
if line:
line = line.strip()
s += " %s\n" % line
if msg.source is not None:
try:
import tracemalloc
tb = tracemalloc.get_object_traceback(msg.source)
except Exception:
# When a warning is logged during Python shutdown, tracemalloc
# and the import machinery don't work anymore
tb = None
if tb is not None:
s += 'Object allocated at (most recent call first):\n'
for frame in tb:
s += (' File "%s", lineno %s\n'
% (frame.filename, frame.lineno))
try:
if linecache is not None:
line = linecache.getline(frame.filename, frame.lineno)
else:
line = None
except Exception:
line = None
if line:
line = line.strip()
s += ' %s\n' % line
return s
# Keep a reference to check if the function was replaced
_showwarning = showwarning
def _showwarnmsg(msg):
"""Hook to write a warning to a file; replace if you like."""
showwarning = globals().get('showwarning', _showwarning)
if showwarning is not _showwarning:
# warnings.showwarning() was replaced
if not callable(showwarning):
raise TypeError("warnings.showwarning() must be set to a "
"function or method")
showwarning(msg.message, msg.category, msg.filename, msg.lineno,
msg.file, msg.line)
return
_showwarnmsg_impl(msg)
# Keep a reference to check if the function was replaced
_formatwarning = formatwarning
def _formatwarnmsg(msg):
"""Function to format a warning the standard way."""
formatwarning = globals().get('formatwarning', _formatwarning)
if formatwarning is not _formatwarning:
# warnings.formatwarning() was replaced
return formatwarning(msg.message, msg.category,
msg.filename, msg.lineno, line=msg.line)
return _formatwarnmsg_impl(msg)
def filterwarnings(action, message="", category=Warning, module="", lineno=0,
append=False):
"""Insert an entry into the list of warnings filters (at the front).
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'message' -- a regex that the warning message must match
'category' -- a class that the warning must be a subclass of
'module' -- a regex that the module name must match
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
import re
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(message, str), "message must be a string"
assert isinstance(category, type), "category must be a class"
assert issubclass(category, Warning), "category must be a Warning subclass"
assert isinstance(module, str), "module must be a string"
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
_add_filter(action, re.compile(message, re.I), category,
re.compile(module), lineno, append=append)
def simplefilter(action, category=Warning, lineno=0, append=False):
"""Insert a simple entry into the list of warnings filters (at the front).
A simple filter matches all modules and messages.
'action' -- one of "error", "ignore", "always", "default", "module",
or "once"
'category' -- a class that the warning must be a subclass of
'lineno' -- an integer line number, 0 matches all warnings
'append' -- if true, append to the list of filters
"""
assert action in ("error", "ignore", "always", "default", "module",
"once"), "invalid action: %r" % (action,)
assert isinstance(lineno, int) and lineno >= 0, \
"lineno must be an int >= 0"
_add_filter(action, None, category, None, lineno, append=append)
def _add_filter(*item, append):
# Remove possible duplicate filters, so new one will be placed
# in correct place. If append=True and duplicate exists, do nothing.
if not append:
try:
filters.remove(item)
except ValueError:
pass
filters.insert(0, item)
else:
if item not in filters:
filters.append(item)
_filters_mutated()
def resetwarnings():
"""Clear the list of warning filters, so that no filters are active."""
filters[:] = []
_filters_mutated()
class _OptionError(Exception):
"""Exception used by option processing helpers."""
pass
# Helper to process -W options passed via sys.warnoptions
def _processoptions(args):
for arg in args:
try:
_setoption(arg)
except _OptionError as msg:
print("Invalid -W option ignored:", msg, file=sys.stderr)
# Helper for _processoptions()
def _setoption(arg):
import re
parts = arg.split(':')
if len(parts) > 5:
raise _OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append('')
action, message, category, module, lineno = [s.strip()
for s in parts]
action = _getaction(action)
message = re.escape(message)
category = _getcategory(category)
module = re.escape(module)
if module:
module = module + '$'
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise _OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
filterwarnings(action, message, category, module, lineno)
# Helper for _setoption()
def _getaction(action):
if not action:
return "default"
if action == "all": return "always" # Alias
for a in ('default', 'always', 'ignore', 'module', 'once', 'error'):
if a.startswith(action):
return a
raise _OptionError("invalid action: %r" % (action,))
# Helper for _setoption()
def _getcategory(category):
import re
if not category:
return Warning
if re.match("^[a-zA-Z0-9_]+$", category):
try:
cat = eval(category)
except NameError:
raise _OptionError("unknown warning category: %r" % (category,))
else:
i = category.rfind(".")
module = category[:i]
klass = category[i+1:]
try:
m = __import__(module, None, None, [klass])
except ImportError:
raise _OptionError("invalid module name: %r" % (module,))
try:
cat = getattr(m, klass)
except AttributeError:
raise _OptionError("unknown warning category: %r" % (category,))
if not issubclass(cat, Warning):
raise _OptionError("invalid warning category: %r" % (category,))
return cat
def _is_internal_frame(frame):
"""Signal whether the frame is an internal CPython implementation detail."""
filename = frame.f_code.co_filename
return 'importlib' in filename and '_bootstrap' in filename
def _next_external_frame(frame):
"""Find the next frame that doesn't involve CPython internals."""
frame = frame.f_back
while frame is not None and _is_internal_frame(frame):
frame = frame.f_back
return frame
# Code typically replaced by _warnings
def warn(message, category=None, stacklevel=1, source=None):
"""Issue a warning, or maybe ignore it or raise an exception."""
# Check if message is already a Warning object
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
if not (isinstance(category, type) and issubclass(category, Warning)):
raise TypeError("category must be a Warning subclass, "
"not '{:s}'".format(type(category).__name__))
# Get context information
try:
if stacklevel <= 1 or _is_internal_frame(sys._getframe(1)):
# If frame is too small to care or if the warning originated in
# internal code, then do not try to hide any frames.
frame = sys._getframe(stacklevel)
else:
frame = sys._getframe(1)
# Look for one frame less since the above line starts us off.
for x in range(stacklevel-1):
frame = _next_external_frame(frame)
if frame is None:
raise ValueError
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = frame.f_globals
lineno = frame.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc"):
filename = filename[:-1]
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
registry = globals.setdefault("__warningregistry__", {})
warn_explicit(message, category, filename, lineno, module, registry,
globals, source)
def warn_explicit(message, category, filename, lineno,
module=None, registry=None, module_globals=None,
source=None):
lineno = int(lineno)
if module is None:
module = filename or "<unknown>"
if module[-3:].lower() == ".py":
module = module[:-3] # XXX What about leading pathname?
if registry is None:
registry = {}
if registry.get('version', 0) != _filters_version:
registry.clear()
registry['version'] = _filters_version
if isinstance(message, Warning):
text = str(message)
category = message.__class__
else:
text = message
message = category(message)
key = (text, category, lineno)
# Quick test for common case
if registry.get(key):
return
# Search the filters
for item in filters:
action, msg, cat, mod, ln = item
if ((msg is None or msg.match(text)) and
issubclass(category, cat) and
(mod is None or mod.match(module)) and
(ln == 0 or lineno == ln)):
break
else:
action = defaultaction
# Early exit actions
if action == "ignore":
registry[key] = 1
return
# Prime the linecache for formatting, in case the
# "file" is actually in a zipfile or something.
import linecache
linecache.getlines(filename, module_globals)
if action == "error":
raise message
# Other actions
if action == "once":
registry[key] = 1
oncekey = (text, category)
if onceregistry.get(oncekey):
return
onceregistry[oncekey] = 1
elif action == "always":
pass
elif action == "module":
registry[key] = 1
altkey = (text, category, 0)
if registry.get(altkey):
return
registry[altkey] = 1
elif action == "default":
registry[key] = 1
else:
# Unrecognized actions are errors
raise RuntimeError(
"Unrecognized action (%r) in warnings.filters:\n %s" %
(action, item))
# Print message and context
msg = WarningMessage(message, category, filename, lineno, source)
_showwarnmsg(msg)
class WarningMessage(object):
_WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
"line", "source")
def __init__(self, message, category, filename, lineno, file=None,
line=None, source=None):
local_values = locals()
for attr in self._WARNING_DETAILS:
setattr(self, attr, local_values[attr])
self._category_name = category.__name__ if category else None
def __str__(self):
return ("{message : %r, category : %r, filename : %r, lineno : %s, "
"line : %r}" % (self.message, self._category_name,
self.filename, self.lineno, self.line))
class catch_warnings(object):
"""A context manager that copies and restores the warnings filter upon
exiting the context.
The 'record' argument specifies whether warnings should be captured by a
custom implementation of warnings.showwarning() and be appended to a list
returned by the context manager. Otherwise None is returned by the context
manager. The objects appended to the list are arguments whose attributes
mirror the arguments to showwarning().
The 'module' argument is to specify an alternative module to the module
named 'warnings' and imported under that name. This argument is only useful
when testing the warnings module itself.
"""
def __init__(self, *, record=False, module=None):
"""Specify whether to record warnings and if an alternative module
should be used other than sys.modules['warnings'].
For compatibility with Python 3.0, please consider all arguments to be
keyword-only.
"""
self._record = record
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._module._filters_mutated()
self._showwarning = self._module.showwarning
self._showwarnmsg = self._module._showwarnmsg
if self._record:
log = []
def showarnmsg(msg):
log.append(msg)
self._module._showwarnmsg = showarnmsg
return log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module._filters_mutated()
self._module.showwarning = self._showwarning
self._module._showwarnmsg = self._showwarnmsg
# filters contains a sequence of filter 5-tuples
# The components of the 5-tuple are:
# - an action: error, ignore, always, default, module, or once
# - a compiled regex that must match the warning message
# - a class representing the warning category
# - a compiled regex that must match the module that is being warned
# - a line number for the line being warning, or 0 to mean any line
# If either if the compiled regexs are None, match anything.
_warnings_defaults = False
try:
from _warnings import (filters, _defaultaction, _onceregistry,
warn, warn_explicit, _filters_mutated)
defaultaction = _defaultaction
onceregistry = _onceregistry
_warnings_defaults = True
except ImportError:
filters = []
defaultaction = "default"
onceregistry = {}
_filters_version = 1
def _filters_mutated():
global _filters_version
_filters_version += 1
# Module initialization
_processoptions(sys.warnoptions)
if not _warnings_defaults:
silence = [ImportWarning, PendingDeprecationWarning]
silence.append(DeprecationWarning)
for cls in silence:
simplefilter("ignore", category=cls)
bytes_warning = sys.flags.bytes_warning
if bytes_warning > 1:
bytes_action = "error"
elif bytes_warning:
bytes_action = "default"
else:
bytes_action = "ignore"
simplefilter(bytes_action, category=BytesWarning, append=1)
# resource usage warnings are enabled by default in pydebug mode
if hasattr(sys, 'gettotalrefcount'):
resource_action = "always"
else:
resource_action = "ignore"
simplefilter(resource_action, category=ResourceWarning, append=1)
del _warnings_defaults
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._monitor_management_client_enums import *
class AzureMonitorMetricsDestination(msrest.serialization.Model):
"""Azure Monitor Metrics destination.
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(AzureMonitorMetricsDestination, self).__init__(**kwargs)
self.name = name
class DataCollectionRule(msrest.serialization.Model):
"""Definition of what monitoring data to collect and where that data should be sent.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDataSources
:param destinations: The specification of destinations.
:type destinations:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDestinations
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2019_11_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'immutable_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'data_sources': {'key': 'dataSources', 'type': 'DataCollectionRuleDataSources'},
'destinations': {'key': 'destinations', 'type': 'DataCollectionRuleDestinations'},
'data_flows': {'key': 'dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_sources: Optional["DataCollectionRuleDataSources"] = None,
destinations: Optional["DataCollectionRuleDestinations"] = None,
data_flows: Optional[List["DataFlow"]] = None,
**kwargs
):
super(DataCollectionRule, self).__init__(**kwargs)
self.description = description
self.immutable_id = None
self.data_sources = data_sources
self.destinations = destinations
self.data_flows = data_flows
self.provisioning_state = None
class DataCollectionRuleAssociation(msrest.serialization.Model):
"""Definition of association of a data collection rule with a monitored Azure resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'data_collection_rule_id': {'key': 'dataCollectionRuleId', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_rule_id: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociation, self).__init__(**kwargs)
self.description = description
self.data_collection_rule_id = data_collection_rule_id
self.provisioning_state = None
class DataCollectionRuleAssociationProxyOnlyResource(msrest.serialization.Model):
"""Definition of generic ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified ID of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: Resource entity tag (ETag).
:vartype etag: str
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'data_collection_rule_id': {'key': 'properties.dataCollectionRuleId', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_rule_id: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.etag = None
self.description = description
self.data_collection_rule_id = data_collection_rule_id
self.provisioning_state = None
class DataCollectionRuleAssociationProxyOnlyResourceListResult(msrest.serialization.Model):
"""A pageable list of resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of resources.
:type value:
list[~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleAssociationProxyOnlyResource]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCollectionRuleAssociationProxyOnlyResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DataCollectionRuleAssociationProxyOnlyResource"],
next_link: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResourceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DataCollectionRuleAssociationProxyOnlyResourceProperties(DataCollectionRuleAssociation):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the association.
:type description: str
:param data_collection_rule_id: The resource ID of the data collection rule that is to be
associated.
:type data_collection_rule_id: str
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleAssociationProvisioningState
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'data_collection_rule_id': {'key': 'dataCollectionRuleId', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_collection_rule_id: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleAssociationProxyOnlyResourceProperties, self).__init__(description=description, data_collection_rule_id=data_collection_rule_id, **kwargs)
class DataSourcesSpec(msrest.serialization.Model):
"""Specification of data sources that will be collected.
:param performance_counters: The list of performance counter data source configurations.
:type performance_counters:
list[~$(python-base-namespace).v2019_11_01_preview.models.PerfCounterDataSource]
:param windows_event_logs: The list of Windows Event Log data source configurations.
:type windows_event_logs:
list[~$(python-base-namespace).v2019_11_01_preview.models.WindowsEventLogDataSource]
:param syslog: The list of Syslog data source configurations.
:type syslog: list[~$(python-base-namespace).v2019_11_01_preview.models.SyslogDataSource]
:param extensions: The list of Azure VM extension data source configurations.
:type extensions:
list[~$(python-base-namespace).v2019_11_01_preview.models.ExtensionDataSource]
"""
_attribute_map = {
'performance_counters': {'key': 'performanceCounters', 'type': '[PerfCounterDataSource]'},
'windows_event_logs': {'key': 'windowsEventLogs', 'type': '[WindowsEventLogDataSource]'},
'syslog': {'key': 'syslog', 'type': '[SyslogDataSource]'},
'extensions': {'key': 'extensions', 'type': '[ExtensionDataSource]'},
}
def __init__(
self,
*,
performance_counters: Optional[List["PerfCounterDataSource"]] = None,
windows_event_logs: Optional[List["WindowsEventLogDataSource"]] = None,
syslog: Optional[List["SyslogDataSource"]] = None,
extensions: Optional[List["ExtensionDataSource"]] = None,
**kwargs
):
super(DataSourcesSpec, self).__init__(**kwargs)
self.performance_counters = performance_counters
self.windows_event_logs = windows_event_logs
self.syslog = syslog
self.extensions = extensions
class DataCollectionRuleDataSources(DataSourcesSpec):
"""The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls to the provisioned endpoint.
:param performance_counters: The list of performance counter data source configurations.
:type performance_counters:
list[~$(python-base-namespace).v2019_11_01_preview.models.PerfCounterDataSource]
:param windows_event_logs: The list of Windows Event Log data source configurations.
:type windows_event_logs:
list[~$(python-base-namespace).v2019_11_01_preview.models.WindowsEventLogDataSource]
:param syslog: The list of Syslog data source configurations.
:type syslog: list[~$(python-base-namespace).v2019_11_01_preview.models.SyslogDataSource]
:param extensions: The list of Azure VM extension data source configurations.
:type extensions:
list[~$(python-base-namespace).v2019_11_01_preview.models.ExtensionDataSource]
"""
_attribute_map = {
'performance_counters': {'key': 'performanceCounters', 'type': '[PerfCounterDataSource]'},
'windows_event_logs': {'key': 'windowsEventLogs', 'type': '[WindowsEventLogDataSource]'},
'syslog': {'key': 'syslog', 'type': '[SyslogDataSource]'},
'extensions': {'key': 'extensions', 'type': '[ExtensionDataSource]'},
}
def __init__(
self,
*,
performance_counters: Optional[List["PerfCounterDataSource"]] = None,
windows_event_logs: Optional[List["WindowsEventLogDataSource"]] = None,
syslog: Optional[List["SyslogDataSource"]] = None,
extensions: Optional[List["ExtensionDataSource"]] = None,
**kwargs
):
super(DataCollectionRuleDataSources, self).__init__(performance_counters=performance_counters, windows_event_logs=windows_event_logs, syslog=syslog, extensions=extensions, **kwargs)
class DestinationsSpec(msrest.serialization.Model):
"""Specification of destinations that can be used in data flows.
:param log_analytics: List of Log Analytics destinations.
:type log_analytics:
list[~$(python-base-namespace).v2019_11_01_preview.models.LogAnalyticsDestination]
:param azure_monitor_metrics: Azure Monitor Metrics destination.
:type azure_monitor_metrics:
~$(python-base-namespace).v2019_11_01_preview.models.DestinationsSpecAzureMonitorMetrics
"""
_attribute_map = {
'log_analytics': {'key': 'logAnalytics', 'type': '[LogAnalyticsDestination]'},
'azure_monitor_metrics': {'key': 'azureMonitorMetrics', 'type': 'DestinationsSpecAzureMonitorMetrics'},
}
def __init__(
self,
*,
log_analytics: Optional[List["LogAnalyticsDestination"]] = None,
azure_monitor_metrics: Optional["DestinationsSpecAzureMonitorMetrics"] = None,
**kwargs
):
super(DestinationsSpec, self).__init__(**kwargs)
self.log_analytics = log_analytics
self.azure_monitor_metrics = azure_monitor_metrics
class DataCollectionRuleDestinations(DestinationsSpec):
"""The specification of destinations.
:param log_analytics: List of Log Analytics destinations.
:type log_analytics:
list[~$(python-base-namespace).v2019_11_01_preview.models.LogAnalyticsDestination]
:param azure_monitor_metrics: Azure Monitor Metrics destination.
:type azure_monitor_metrics:
~$(python-base-namespace).v2019_11_01_preview.models.DestinationsSpecAzureMonitorMetrics
"""
_attribute_map = {
'log_analytics': {'key': 'logAnalytics', 'type': '[LogAnalyticsDestination]'},
'azure_monitor_metrics': {'key': 'azureMonitorMetrics', 'type': 'DestinationsSpecAzureMonitorMetrics'},
}
def __init__(
self,
*,
log_analytics: Optional[List["LogAnalyticsDestination"]] = None,
azure_monitor_metrics: Optional["DestinationsSpecAzureMonitorMetrics"] = None,
**kwargs
):
super(DataCollectionRuleDestinations, self).__init__(log_analytics=log_analytics, azure_monitor_metrics=azure_monitor_metrics, **kwargs)
class DataCollectionRuleResource(msrest.serialization.Model):
"""Definition of ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param location: Required. The geo-location where the resource lives.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param kind: The kind of the resource. Possible values include: "Linux", "Windows".
:type kind: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleResourceKind
:ivar id: Fully qualified ID of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: Resource entity tag (ETag).
:vartype etag: str
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDataSources
:param destinations: The specification of destinations.
:type destinations:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDestinations
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2019_11_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'location': {'required': True},
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
'immutable_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'kind': {'key': 'kind', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'immutable_id': {'key': 'properties.immutableId', 'type': 'str'},
'data_sources': {'key': 'properties.dataSources', 'type': 'DataCollectionRuleDataSources'},
'destinations': {'key': 'properties.destinations', 'type': 'DataCollectionRuleDestinations'},
'data_flows': {'key': 'properties.dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
kind: Optional[Union[str, "KnownDataCollectionRuleResourceKind"]] = None,
description: Optional[str] = None,
data_sources: Optional["DataCollectionRuleDataSources"] = None,
destinations: Optional["DataCollectionRuleDestinations"] = None,
data_flows: Optional[List["DataFlow"]] = None,
**kwargs
):
super(DataCollectionRuleResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
self.kind = kind
self.id = None
self.name = None
self.type = None
self.etag = None
self.description = description
self.immutable_id = None
self.data_sources = data_sources
self.destinations = destinations
self.data_flows = data_flows
self.provisioning_state = None
class DataCollectionRuleResourceListResult(msrest.serialization.Model):
"""A pageable list of resources.
All required parameters must be populated in order to send to Azure.
:param value: Required. A list of resources.
:type value:
list[~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleResource]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataCollectionRuleResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: List["DataCollectionRuleResource"],
next_link: Optional[str] = None,
**kwargs
):
super(DataCollectionRuleResourceListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DataCollectionRuleResourceProperties(DataCollectionRule):
"""Resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
:param description: Description of the data collection rule.
:type description: str
:ivar immutable_id: The immutable ID of this data collection rule. This property is READ-ONLY.
:vartype immutable_id: str
:param data_sources: The specification of data sources.
This property is optional and can be omitted if the rule is meant to be used via direct calls
to the provisioned endpoint.
:type data_sources:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDataSources
:param destinations: The specification of destinations.
:type destinations:
~$(python-base-namespace).v2019_11_01_preview.models.DataCollectionRuleDestinations
:param data_flows: The specification of data flows.
:type data_flows: list[~$(python-base-namespace).v2019_11_01_preview.models.DataFlow]
:ivar provisioning_state: The resource provisioning state. Possible values include: "Creating",
"Updating", "Deleting", "Succeeded", "Failed".
:vartype provisioning_state: str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataCollectionRuleProvisioningState
"""
_validation = {
'immutable_id': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'data_sources': {'key': 'dataSources', 'type': 'DataCollectionRuleDataSources'},
'destinations': {'key': 'destinations', 'type': 'DataCollectionRuleDestinations'},
'data_flows': {'key': 'dataFlows', 'type': '[DataFlow]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
data_sources: Optional["DataCollectionRuleDataSources"] = None,
destinations: Optional["DataCollectionRuleDestinations"] = None,
data_flows: Optional[List["DataFlow"]] = None,
**kwargs
):
super(DataCollectionRuleResourceProperties, self).__init__(description=description, data_sources=data_sources, destinations=destinations, data_flows=data_flows, **kwargs)
class DataFlow(msrest.serialization.Model):
"""Definition of which streams are sent to which destinations.
:param streams: List of streams for this data flow.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownDataFlowStreams]
:param destinations: List of destinations for this data flow.
:type destinations: list[str]
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'destinations': {'key': 'destinations', 'type': '[str]'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownDataFlowStreams"]]] = None,
destinations: Optional[List[str]] = None,
**kwargs
):
super(DataFlow, self).__init__(**kwargs)
self.streams = streams
self.destinations = destinations
class DestinationsSpecAzureMonitorMetrics(AzureMonitorMetricsDestination):
"""Azure Monitor Metrics destination.
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(DestinationsSpecAzureMonitorMetrics, self).__init__(name=name, **kwargs)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~$(python-base-namespace).v2019_11_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~$(python-base-namespace).v2019_11_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~$(python-base-namespace).v2019_11_01_preview.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class ExtensionDataSource(msrest.serialization.Model):
"""Definition of which data will be collected from a separate VM extension that integrates with the Azure Monitor Agent.
Collected from either Windows and Linux machines, depending on which extension is defined.
All required parameters must be populated in order to send to Azure.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownExtensionDataSourceStreams]
:param extension_name: Required. The name of the VM extension.
:type extension_name: str
:param extension_settings: The extension settings. The format is specific for particular
extension.
:type extension_settings: any
:param input_data_sources: The list of data sources this extension needs data from.
:type input_data_sources: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'extension_name': {'required': True},
}
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'extension_name': {'key': 'extensionName', 'type': 'str'},
'extension_settings': {'key': 'extensionSettings', 'type': 'object'},
'input_data_sources': {'key': 'inputDataSources', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
extension_name: str,
streams: Optional[List[Union[str, "KnownExtensionDataSourceStreams"]]] = None,
extension_settings: Optional[Any] = None,
input_data_sources: Optional[List[str]] = None,
name: Optional[str] = None,
**kwargs
):
super(ExtensionDataSource, self).__init__(**kwargs)
self.streams = streams
self.extension_name = extension_name
self.extension_settings = extension_settings
self.input_data_sources = input_data_sources
self.name = name
class LogAnalyticsDestination(msrest.serialization.Model):
"""Log Analytics destination.
Variables are only populated by the server, and will be ignored when sending a request.
:param workspace_resource_id: The resource ID of the Log Analytics workspace.
:type workspace_resource_id: str
:ivar workspace_id: The Customer ID of the Log Analytics workspace.
:vartype workspace_id: str
:param name: A friendly name for the destination.
This name should be unique across all destinations (regardless of type) within the data
collection rule.
:type name: str
"""
_validation = {
'workspace_id': {'readonly': True},
}
_attribute_map = {
'workspace_resource_id': {'key': 'workspaceResourceId', 'type': 'str'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
workspace_resource_id: Optional[str] = None,
name: Optional[str] = None,
**kwargs
):
super(LogAnalyticsDestination, self).__init__(**kwargs)
self.workspace_resource_id = workspace_resource_id
self.workspace_id = None
self.name = name
class PerfCounterDataSource(msrest.serialization.Model):
"""Definition of which performance counters will be collected and how they will be collected by this data collection rule.
Collected from both Windows and Linux machines where the counter is present.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownPerfCounterDataSourceStreams]
:param sampling_frequency_in_seconds: The number of seconds between consecutive counter
measurements (samples).
:type sampling_frequency_in_seconds: int
:param counter_specifiers: A list of specifier names of the performance counters you want to
collect.
Use a wildcard (*) to collect a counter for all instances.
To get a list of performance counters on Windows, run the command 'typeperf'.
:type counter_specifiers: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'sampling_frequency_in_seconds': {'key': 'samplingFrequencyInSeconds', 'type': 'int'},
'counter_specifiers': {'key': 'counterSpecifiers', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownPerfCounterDataSourceStreams"]]] = None,
sampling_frequency_in_seconds: Optional[int] = None,
counter_specifiers: Optional[List[str]] = None,
name: Optional[str] = None,
**kwargs
):
super(PerfCounterDataSource, self).__init__(**kwargs)
self.streams = streams
self.sampling_frequency_in_seconds = sampling_frequency_in_seconds
self.counter_specifiers = counter_specifiers
self.name = name
class ResourceForUpdate(msrest.serialization.Model):
"""Definition of ARM tracked top level resource properties for update operation.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ResourceForUpdate, self).__init__(**kwargs)
self.tags = tags
class SyslogDataSource(msrest.serialization.Model):
"""Definition of which syslog data will be collected and how it will be collected.
Only collected from Linux machines.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownSyslogDataSourceStreams]
:param facility_names: The list of facility names.
:type facility_names: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownSyslogDataSourceFacilityNames]
:param log_levels: The log levels to collect.
:type log_levels: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownSyslogDataSourceLogLevels]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'facility_names': {'key': 'facilityNames', 'type': '[str]'},
'log_levels': {'key': 'logLevels', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownSyslogDataSourceStreams"]]] = None,
facility_names: Optional[List[Union[str, "KnownSyslogDataSourceFacilityNames"]]] = None,
log_levels: Optional[List[Union[str, "KnownSyslogDataSourceLogLevels"]]] = None,
name: Optional[str] = None,
**kwargs
):
super(SyslogDataSource, self).__init__(**kwargs)
self.streams = streams
self.facility_names = facility_names
self.log_levels = log_levels
self.name = name
class WindowsEventLogDataSource(msrest.serialization.Model):
"""Definition of which Windows Event Log events will be collected and how they will be collected.
Only collected from Windows machines.
:param streams: List of streams that this data source will be sent to.
A stream indicates what schema will be used for this data and usually what table in Log
Analytics the data will be sent to.
:type streams: list[str or
~$(python-base-namespace).v2019_11_01_preview.models.KnownWindowsEventLogDataSourceStreams]
:param x_path_queries: A list of Windows Event Log queries in XPATH format.
:type x_path_queries: list[str]
:param name: A friendly name for the data source.
This name should be unique across all data sources (regardless of type) within the data
collection rule.
:type name: str
"""
_attribute_map = {
'streams': {'key': 'streams', 'type': '[str]'},
'x_path_queries': {'key': 'xPathQueries', 'type': '[str]'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
streams: Optional[List[Union[str, "KnownWindowsEventLogDataSourceStreams"]]] = None,
x_path_queries: Optional[List[str]] = None,
name: Optional[str] = None,
**kwargs
):
super(WindowsEventLogDataSource, self).__init__(**kwargs)
self.streams = streams
self.x_path_queries = x_path_queries
self.name = name
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
import logging
import pdb
import random
import sys
from graphwalker import codeloader
from graphwalker import halting
# some ghetto enums
COST, PATH = 0, 1
# approximation of infinity(tm)
inf = 2 ** 31
log = logging.getLogger(__name__)
class Planner(object):
randcls = random.Random
def __init__(self, *al, **kw):
self.al, self.kw = al, kw
self.rng = self.randcls(self.kw.get('seed'))
def _setup(self, g, stop, start, context):
for ident, vert in g.V.items():
if vert.name == start:
self.vert = vert
break
else:
if start in g.V:
self.vert = g.V[start]
else:
raise RuntimeError("Could not find start vertex")
stop.add(vert)
self.g, self.plan, self.stop = g, [], stop
return self.g.V, self.g.E, self.plan, self.vert
def forced_plan(self, plan=None):
"""Enter forced steps from Start source vertex.
The start node is normally a source, but sometimes the source is a
larger string of single-edge vertices. This enters them into the plan.
"""
plan = plan if plan is not None else self.plan
I, O = self.g.vert_degrees()
while len(self.vert.outgoing) == 1 and I[self.vert.id] == 0:
self.g.del_vert(self.vert)
self.vert = self.step(self.vert, self.vert.outgoing[0], plan)
I, O = self.g.vert_degrees()
def visit(self, it, plan=None):
plan = self.plan if plan is None else plan
self.stop.add(it)
plan.append(it)
def step(self, vert, edge, plan=None):
plan = self.plan if plan is None else plan
assert edge.src == vert.id, 'Edge not from this vertex'
dest = self.g.V[edge.tgt]
self.visit(edge, plan)
self.visit(dest, plan)
return dest
class EvenRandom(Planner):
def __call__(self, g, stop, start, context):
"""Walk through the graph by random edges until done."""
self._setup(g, stop, start, context)
return iter(self)
def choose_edge(self, edges):
return self.rng.choice(edges)
def __iter__(self):
while not self.stop:
edge = self.choose_edge(self.vert.outgoing)
self.stop.add(edge)
yield edge
self.vert = self.g.V[edge.tgt]
self.stop.add(self.vert)
yield self.vert
class Random(EvenRandom):
"""Walk through the graph by random edges until done."""
def choose_edge(self, edges):
naive, weighted = [], []
for e in edges:
if e.weight is None:
naive.append(e)
elif e.weight[-1:] == '%':
weighted.append((e, float(e.weight[:-1]) / 100))
else:
weighted.append((e, float(e.weight)))
if not weighted:
return self.rng.choice(edges)
total_given_probability = sum(w for e, w in weighted)
remaining = 1.0 - total_given_probability
if total_given_probability > 1.001:
log.warn("Probalities supplied exceed unity")
if len(naive) > 0:
if remaining <= 0:
log.warn("Unweighted edges get zero probability")
else:
weighted.extend((e, remaining / len(naive)) for e in naive)
else:
if remaining >= 0.01:
log.warn("Weighted edges sum to less than unity")
x, X = 0, self.rng.uniform(0.0, sum(w for e, w in weighted))
for e, w in weighted:
x += w
if x >= X:
return e
return e
class Euler(Planner):
"""Walk through the graph by ordered edges until done."""
def __call__(self, g, stop, start, context):
self._setup(g, stop, start, context)
self.g = self.g.copy()
self.forced_plan()
self.g.eulerize()
self.stop = halting.Never().start(g, None)
vert = self.vert
seen = set()
def loop(vert):
subplan = []
begin = vert
while len(seen) < len(self.g.E):
for edge in vert.outgoing:
if edge.id not in seen:
seen.add(edge.id)
vert = self.step(vert, edge, subplan)
break
else:
assert vert.id == begin.id, "Graph is not Eulerian"
break
return subplan
plan = loop(vert) # initial plan
while len(seen) < len(self.g.E):
j = 0
for i in range(len(plan)):
if self.g.V.get(plan[i].id) is not plan[i]:
continue
vert = plan[i]
for edge in vert.outgoing:
if edge.id not in seen:
# splice new loop into plan
plan[i + 1: i + 1] = loop(vert)
j += 1
break
else:
assert j, "Graph is not connected"
self.plan, plan = [], self.plan + plan
for step in plan:
if stop:
break
self.visit(step)
return self.plan
class Goto(Planner):
"""Plan direct path to goal state(s), repeating [repeat] times."""
def __init__(self, *al, **kw):
self.al, self.kw = al, kw
self.goals = self.al
self.repeat = kw.pop('repeat', 1)
def __call__(self, g, stop, start, context):
self._setup(g, stop, start, context)
self.d = d = self.g.all_pairs_shortest_path()
for i in xrange(self.repeat or inf):
for goal in self.goals:
if self.g.is_stuck(self.vert) or self.stop:
break
if goal == 'random':
goal = self.rng.choice(self.g.V.keys())
try:
cost, path = min(d[(self.vert.id, v.id)]
for v in self.g.V.values()
if ((v.name == goal or
v.id == goal) and
v is not self.vert))
plan = path
except ValueError:
continue
for item in plan:
edge = [e for e in self.vert.outgoing
if e.tgt == item][0]
self.vert = self.step(self.vert, edge)
return self.plan
class Interactive(Planner):
"""Planner that yields steps (or not) from user interaction.
The protocol between choose and iterplan is deliberately kept simple to
keep it simple to replace the choose method.
"""
raw_input = raw_input
out = sys.stderr
debugger = pdb.Pdb('\t', sys.stdin, sys.stderr)
help = """\
0-n: Traverse edge
h(elp) This message
d(ebug) Enter Pdb
g(oto) Use Goto planner to go some vertex
f(orce) Forcibly insert some words into the plan
j(ump) Forcibly set the vert where the planner believes it is at
q(uit) End the interactive session
"""
def choose(self, planner, alts):
while True:
for i in range(len(alts)):
print >>self.out, i, alts[i]
try:
self.out.write('> ')
return self.raw_input()
except KeyboardInterrupt:
return None
except EOFError:
return None
except Exception as e:
print >>self.out, 'huh? %r' % e
choose_fn = choose
def __init__(self, *al, **kw):
self.al, self.kw = al, kw
def goto(self, goals):
stop = halting.Never().start(self.g, self.context)
return Goto(*goals)(self.g, stop, self.vert.id, self.context)
def choose_vert(self, name):
if name in self.g.V:
return self.g.V[name]
candidates = [v for v in self.g.V.values() if v.name == name]
if len(candidates) == 1:
return candidates[0]
else:
i = self.choose_fn(self, self.format_vert_list(candidates))
return candidates[int(i)]
def __call__(self, g, stop, start, context):
self._setup(g, stop, start, context)
self.context = context
return iter(self)
def format_vert_list(self, verts):
V = self.g.V
alts = []
for v in verts:
outs = set(['[%s]\t--(%s)-->%s' % (e.id, e.name, V[e.tgt].name)
for e in v.outgoing])
alts.append(v.name + '\n ' + '\n '.join(outs))
return alts
def format_edge_list(self, edges):
V = self.g.V
return ["[%s]\t%s--(%s)-->%s" %
(e.id, V[e.src].name, e.name, V[e.tgt].name)
for e in edges]
def __iter__(self):
while True:
edges = self.vert.outgoing
if not edges:
raise StopIteration()
where = "== Currently at: %s [%s]" % (self.vert.name, self.vert.id)
print >>self.out, where
if self.stop:
print >>self.out, "According to end conditions, we're done"
i = self.choose_fn(self, self.format_edge_list(edges))
self.out.flush()
if i in ('q', None): # quit
raise StopIteration()
elif i == '':
print >>self.out, 'huh?'
continue
elif i[0] == 'd': # debugger
self.debugger.set_trace()
elif i[0] == 'f': # force
for s in i.split()[1:]:
yield (s, s, ())
elif i[0] == 'g': # goto
for name in i.split()[1:]:
for step in self.goto([self.choose_vert(name).id]):
yield step
self.vert = step
elif i[0] == 'j': # jump
while True:
try:
self.vert = self.choose_vert(i.split()[-1])
break
except Exception as e:
print >>self.out, 'huh? %r' % e
elif i[0] in 'h?':
print >>self.out, self.help
elif i.strip().isdigit():
index = int(i.strip())
if index >= len(edges):
print >>self.out, 'huh?'
else:
edge = edges[index]
self.vert = self.step(self.vert, edge)
yield edge
yield self.vert
class MasterPlan(Planner):
def __init__(self, plans):
self.plans = plans
self.i = 0
def __call__(self, g, stop, start, context):
self.step = (None, start)
while self.i < len(self.plans):
planner = self.plans[self.i]
start = self.step[0] or self.step[1]
for step in planner(g, stop, start, context):
self.step = step
yield step
self.i += 1
def build(specs):
"""Import, construct and aggregate requested reporters."""
planners = []
for spec in specs:
planners.append(
codeloader.construct(
spec,
default_module=__name__,
call_by_default=True))
if len(planners) == 1:
return planners[0]
else:
return MasterPlan(planners)
planners = [cls
for cls in locals().values()
if type(cls) is type
and issubclass(cls, Planner)
and cls.__doc__
and cls is not Planner]
|
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""An OrderedSet is a set that remembers its insertion order, and a FrozenOrderedSet is one that is
also immutable.
Based on the library `ordered-set` developed by Robyn Speer and released under the MIT license:
https://github.com/LuminosoInsight/ordered-set.
The library `ordered-set` is itself originally based on a recipe originally posted to ActivateState
Recipes by Raymond Hettiger and released under the MIT license:
http://code.activestate.com/recipes/576694/.
"""
from __future__ import annotations
import itertools
from typing import AbstractSet, Any, Hashable, Iterable, Iterator, MutableSet, Set, TypeVar, cast
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_TAbstractOrderedSet = TypeVar("_TAbstractOrderedSet", bound="_AbstractOrderedSet")
class _AbstractOrderedSet(AbstractSet[T]):
"""Common functionality shared between OrderedSet and FrozenOrderedSet."""
def __init__(self, iterable: Iterable[T] | None = None) -> None:
# Using a dictionary, rather than using the recipe's original `self |= iterable`, results
# in a ~20% performance increase for the constructor.
#
# NB: Dictionaries are ordered in Python 3.6+. While this was not formalized until Python
# 3.7, Python 3.6 uses this behavior; Pants requires CPython 3.6+ to run, so this
# assumption is safe for us to rely on.
self._items: dict[T, None] = {v: None for v in iterable or ()}
def __len__(self) -> int:
"""Returns the number of unique elements in the set."""
return len(self._items)
def __copy__(self: _TAbstractOrderedSet) -> _TAbstractOrderedSet:
"""Return a shallow copy of this object."""
return self.__class__(self)
def __contains__(self, key: Any) -> bool:
"""Test if the item is in this ordered set."""
return key in self._items
def __iter__(self) -> Iterator[T]:
return iter(self._items)
def __reversed__(self) -> Iterator[T]:
return reversed(tuple(self._items.keys()))
def __repr__(self) -> str:
name = self.__class__.__name__
if not self:
return f"{name}()"
return f"{name}({list(self)!r})"
def __eq__(self, other: Any) -> bool:
"""Returns True if other is the same type with the same elements and same order."""
if not isinstance(other, self.__class__):
return NotImplemented
return all(x == y for x, y in itertools.zip_longest(self._items, other._items))
def __or__(self: _TAbstractOrderedSet, other: Iterable[T]) -> _TAbstractOrderedSet: # type: ignore[override]
return self.union(other)
def union(self: _TAbstractOrderedSet, *others: Iterable[T]) -> _TAbstractOrderedSet:
"""Combines all unique items.
Each item's order is defined by its first appearance.
"""
# Differences with AbstractSet: our set union forces "other" to have the same type. That
# is, while AbstractSet allows {1, 2, 3} | {(True, False)} resulting in
# set[int | tuple[bool, bool]], the analogous for descendants of _TAbstractOrderedSet is
# not allowed.
#
# GOTCHA: given _TAbstractOrderedSet[S]:
# if T is a subclass of S => _TAbstractOrderedSet[S] => *appears* to perform
# unification but it doesn't
# if S is a subclass of T => type error (while AbstractSet would resolve to
# AbstractSet[T])
merged_iterables = itertools.chain([cast(Iterable[T], self)], others)
return self.__class__(itertools.chain.from_iterable(merged_iterables))
def __and__(self: _TAbstractOrderedSet, other: Iterable[T]) -> _TAbstractOrderedSet:
# The parent class's implementation of this is backwards.
return self.intersection(other)
def intersection(self: _TAbstractOrderedSet, *others: Iterable[T]) -> _TAbstractOrderedSet:
"""Returns elements in common between all sets.
Order is defined only by the first set.
"""
cls = self.__class__
if not others:
return cls(self)
common = set.intersection(*(set(other) for other in others))
return cls(item for item in self if item in common)
def difference(self: _TAbstractOrderedSet, *others: Iterable[T]) -> _TAbstractOrderedSet:
"""Returns all elements that are in this set but not the others."""
cls = self.__class__
if not others:
return cls(self)
other = set.union(*(set(other) for other in others))
return cls(item for item in self if item not in other)
def issubset(self, other: Iterable[T]) -> bool:
"""Report whether another set contains this set."""
try:
# Fast check for obvious cases
if len(self) > len(other): # type: ignore[arg-type]
return False
except TypeError:
pass
return all(item in other for item in self)
def issuperset(self, other: Iterable[T]) -> bool:
"""Report whether this set contains another set."""
try:
# Fast check for obvious cases
if len(self) < len(other): # type: ignore[arg-type]
return False
except TypeError:
pass
return all(item in self for item in other)
def __xor__(self: _TAbstractOrderedSet, other: Iterable[T]) -> _TAbstractOrderedSet: # type: ignore[override]
return self.symmetric_difference(other)
def symmetric_difference(
self: _TAbstractOrderedSet, other: Iterable[T]
) -> _TAbstractOrderedSet:
"""Return the symmetric difference of this OrderedSet and another set as a new OrderedSet.
That is, the new set will contain all elements that are in exactly one of the sets.
Their order will be preserved, with elements from `self` preceding elements from `other`.
"""
cls = self.__class__
diff1 = cls(self).difference(other)
diff2 = cls(other).difference(self)
return diff1.union(diff2)
class OrderedSet(_AbstractOrderedSet[T], MutableSet[T]):
"""A mutable set that retains its order.
This is not safe to use with the V2 engine.
"""
def add(self, key: T) -> None:
"""Add `key` as an item to this OrderedSet."""
self._items[key] = None
def update(self, iterable: Iterable[T]) -> None:
"""Update the set with the given iterable sequence."""
for item in iterable:
self.add(item)
def discard(self, key: T) -> None:
"""Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
"""
self._items.pop(key, None)
def clear(self) -> None:
"""Remove all items from this OrderedSet."""
self._items.clear()
def difference_update(self, *others: Iterable[T]) -> None:
"""Update this OrderedSet to remove items from one or more other sets."""
items_to_remove: set[T] = set()
for other in others:
items_as_set = set(other)
items_to_remove |= items_as_set
self._items = {item: None for item in self._items.keys() if item not in items_to_remove}
def intersection_update(self, other: Iterable[T]) -> None:
"""Update this OrderedSet to keep only items in another set, preserving their order in this
set."""
other = set(other)
self._items = {item: None for item in self._items.keys() if item in other}
def symmetric_difference_update(self, other: Iterable[T]) -> None:
"""Update this OrderedSet to remove items from another set, then add items from the other
set that were not present in this set."""
items_to_add = [item for item in other if item not in self]
items_to_remove = cast(Set[T], set(other))
self._items = {item: None for item in self._items.keys() if item not in items_to_remove}
for item in items_to_add:
self._items[item] = None
class FrozenOrderedSet(_AbstractOrderedSet[T_co], Hashable):
"""A frozen (i.e. immutable) set that retains its order.
This is safe to use with the V2 engine.
"""
def __init__(self, iterable: Iterable[T_co] | None = None) -> None:
super().__init__(iterable)
self._hash: int | None = None
def __hash__(self) -> int:
if self._hash is None:
self._hash = 0
for item in self._items.keys():
self._hash ^= hash(item)
return self._hash
|
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for parameter_server_client.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
from absl import logging
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute.client import client
from tensorflow.python.distribute.client import parameter_server_client
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.server_lib import ClusterSpec
class ErrorReportingThread(threading.Thread):
error = None
def __init__(self, *args, **kwargs):
assert "target" in kwargs
target = kwargs["target"]
@functools.wraps(target)
def wrapped_target(*args, **kwargs):
try:
return target(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
ErrorReportingThread.error = e
kwargs["target"] = wrapped_target
super(ErrorReportingThread, self).__init__(*args, **kwargs)
class TestCaseWithErrorReportingThread(test.TestCase):
@classmethod
def setUpClass(cls):
cls._threading_thread = threading.Thread
threading.Thread = ErrorReportingThread
super(TestCaseWithErrorReportingThread, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestCaseWithErrorReportingThread, cls).tearDownClass()
threading.Thread = cls._threading_thread
def setUp(self):
ErrorReportingThread.error = None
super(TestCaseWithErrorReportingThread, self).setUp()
def tearDown(self):
super(TestCaseWithErrorReportingThread, self).tearDown()
if ErrorReportingThread.error:
raise ErrorReportingThread.error # pylint: disable=raising-bad-type
def make_client(num_workers, num_ps):
# TODO(rchao): Test the internal rpc_layer version.
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
cluster_def["chief"] = [
"localhost:%d" % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = SimpleClusterResolver(
ClusterSpec(cluster_def), rpc_layer="grpc")
return parameter_server_client.ParameterServerClient(cluster_resolver)
class ParameterServerClientTest(TestCaseWithErrorReportingThread):
@classmethod
def setUpClass(cls):
super(ParameterServerClientTest, cls).setUpClass()
cls.client = make_client(num_workers=3, num_ps=2)
def testBasic(self):
self.client._strategy.extended._variable_count = 0
with self.client.strategy.scope():
v1 = variables.Variable(initial_value=0.0)
v2 = variables.Variable(initial_value=1.0)
self.assertEqual(self.client._strategy.extended._variable_count, 2)
@def_function.function
def worker_fn():
v1.assign_add(0.1)
v2.assign_sub(0.2)
return v1.read_value() / v2.read_value()
results = self.client.schedule(worker_fn)
logging.info("Results of experimental_run_v2: %f",
self.client.fetch(results))
self.assertAlmostEqual(v1.read_value().numpy(), 0.1, delta=1e-6)
self.assertAlmostEqual(v2.read_value().numpy(), 0.8, delta=1e-6)
def testFnReturnNestedValues(self):
x = constant_op.constant(1)
@def_function.function
def f():
return x + 1, (x + 2, x + 3), [x + 4], {"v": x}
got = self.client.schedule(f)
want = 2, (3, 4), [5], {"v": 1}
self.assertEqual(self.client.fetch(got), want)
def testInputFunction(self):
def input_fn():
return dataset_ops.DatasetV2.range(1, 2)
with self.client.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
return x
distributed_dataset = self.client.create_per_worker_dataset(input_fn)
result = self.client.schedule(worker_fn, args=(iter(distributed_dataset),))
result = self.client.fetch(result)
self.assertEqual(result, (1,))
result = self.client.schedule(worker_fn, args=(iter(distributed_dataset),))
result = self.client.fetch(result)
self.assertEqual(result, (1,))
self.assertAlmostEqual(v.read_value().numpy(), 2, delta=1e-6)
def testAsyncScheduleAndJoin(self):
def input_fn():
return dataset_ops.DatasetV2.from_tensor_slices([2] * 10)
with self.client.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
distributed_dataset = self.client.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.client.join()
self.assertEqual(v.read_value().numpy(), 0)
for _ in range(5):
self.client.schedule(worker_fn, args=(iterator,))
self.client.join()
# With 5 addition it should be 2*5 = 10.
self.assertEqual(v.read_value().numpy(), 10)
for _ in range(5):
self.client.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.client.join()
self.client.join()
self.client.join()
self.assertTrue(self.client.done())
# Likewise, it's now 20.
self.assertEqual(v.read_value().numpy(), 20)
def testInputFunctionWithMap(self):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
return dataset_ops.DatasetV2.range(0, 10).map(map_fn)
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_dataset = (
self.client.create_per_worker_dataset(input_fn))
result = self.client.schedule(
worker_fn, args=(iter(distributed_dataset),))
self.assertEqual(result.fetch(), (10,))
self.assertEqual(self._map_fn_tracing_count, 1)
def testInputFunctionCreateVariables(self):
def input_fn():
v = variables.Variable(initial_value=0.0)
return v.read_value()
with self.assertRaises(ValueError):
self.client.create_per_worker_dataset(input_fn)
def testPerWorkerValue(self):
var_shape = tuple()
var_dtype = dtypes.float32
var_name = "var"
def create_var():
var = variables.Variable(
initial_value=0.0, dtype=var_dtype, name=var_name)
self.assertIn("worker", var.device)
return var
worker_local_var = self.client._create_per_worker_resources(create_var)
# The following is a workaround to allow `worker_local_var` to be passed in
# as args to the `client.schedule` method which requires tensor specs to
# trace tf.function but _create_worker_resources' return values don't have
# tensor specs. We can get rid of this workaround once
# _create_worker_resources is able to infer the tensor spec of the return
# value of the function passed in. See b/154675763.
for var in worker_local_var._values:
var._set_type_spec(tensor_spec.TensorSpec(var_shape, var_dtype, var_name))
def worker_fn(var):
var.assign_add(1.0)
for _ in range(10):
# Which slice of `worker_local_var` will be used will depend on which
# worker the `worker_fn` gets scheduled on.
self.client.schedule(worker_fn, args=(worker_local_var,))
self.client.join()
var_sum = sum(self.client.fetch(worker_local_var._values))
self.assertEqual(var_sum, 10.0)
class LimitedClosureQueueSizeBasicTest(ParameterServerClientTest):
"""Test basic functionality works with explicit maximum closure queue size.
Execute the same set of test cases as in ParameterServerClientTest, with an
explicit size limit for the closure queue. Note that even when the queue size
is set to infinite, there is still a maximum practical size (depends on host
memory limit) that might cause the queue.put operations to be blocking when
scheduling a large number of closures on a big cluster. These tests make sure
that the client does not run into deadlocks in such scenario.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueSizeBasicTest, cls).setUpClass()
client._CLOSURE_QUEUE_MAX_SIZE = 2
cls.client = make_client(num_workers=3, num_ps=2)
class ErrorReportingTest(TestCaseWithErrorReportingThread):
@classmethod
def setUpClass(cls):
super(ErrorReportingTest, cls).setUpClass()
cls.client = make_client(num_workers=3, num_ps=2)
with cls.client.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
@def_function.function
def _normal_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
self.iteration.assign_add(1.0)
return math_ops.reduce_mean(math_ops.matmul(x, y))
@def_function.function
def _error_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
check_ops.assert_non_positive_v2(math_ops.reduce_sum(math_ops.matmul(x, y)))
self.iteration.assign_add(1.0)
return self.iteration
@def_function.function
def _long_function(self):
x = random_ops.random_uniform((1000, 1000))
for _ in math_ops.range(10000):
a = random_ops.random_uniform((1000, 1000))
b = random_ops.random_uniform((1000, 1000))
x += math_ops.matmul(a, b)
return x
def testJoinRaiseError(self):
for _ in range(3):
self.client.schedule(self._normal_function)
self.client.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.client.join()
def testScheduleRaiseError(self):
for _ in range(3):
self.client.schedule(self._normal_function)
self.client.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.client.schedule(self._normal_function)
def testScheduleRaiseErrorWithMultipleFailure(self):
for _ in range(3):
self.client.schedule(self._normal_function)
self.client.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.client.schedule(self._error_function)
self.client.join()
def testErrorWillbeCleared(self):
self.client.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.client.join()
for _ in range(3):
self.client.schedule(self._normal_function)
self.client.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.client.join()
def testRemoteValueReturnError(self):
result = self.client.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
result.fetch()
# Clear the error.
with self.assertRaises(errors.InvalidArgumentError):
self.client.join()
def testInputError(self):
aborted = self.client.schedule(self._error_function)
@def_function.function
def func(x):
return x + 1.0
with self.assertRaises(errors.InvalidArgumentError):
self.client.join()
result = self.client.schedule(func, args=(aborted,))
with self.assertRaises(client.InputError):
result.fetch()
with self.assertRaises(client.InputError):
self.client.join()
def testCancellation(self):
for _ in range(3):
self.client.schedule(self._normal_function)
long_function = self.client.schedule(self._long_function)
self.client.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.client.join()
with self.assertRaises(client.FunctionRetryableError):
long_function.fetch()
for _ in range(3):
self.client.schedule(self._normal_function)
self.client.join()
class LimitedClosureQueueErrorTest(ErrorReportingTest):
"""Test error reporting works with explicit maximum closure queue size.
Execute the same set of test cases as in ErrorReportingTest, with an explicit
size limit for the closure queue.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueErrorTest, cls).setUpClass()
client._CLOSURE_QUEUE_MAX_SIZE = 2
cls.client = make_client(num_workers=3, num_ps=2)
with cls.client.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
class StrategyRunTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(StrategyRunTest, cls).setUpClass()
cls.client = make_client(num_workers=1, num_ps=1)
def testStrategyRun(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.client._strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(initial_value=1)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribution_strategy_context.in_cross_replica_context())
return input_tensor + v
return self.client._strategy.run(replica_fn, args=(input_tensor,))
# Asserting scheduling in scope has the expected behavior.
result = self.client.schedule(worker_fn, args=(constant_op.constant(3),))
self.assertIsInstance(result, client.RemoteValue)
self.assertEqual(result.fetch(), 4)
# Asserting scheduling out of scope has the expected behavior.
result = self.client.schedule(worker_fn, args=(constant_op.constant(3),))
self.assertEqual(result.fetch(), 4)
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/python
#
# Copyright (c) 2012, NORDUnet A/S
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the NORDUnet nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author : Fredrik Thulin <fredrik@thulin.net>
#
"""
Configuration (key management) interface to YubiHSM.
"""
import os
import re
import serial
import logging
from vccs_hsm_keydb import HsmKey
class VCCSCfgError(Exception):
"""
Base class for all exceptions relating to the VCCS HSM communication.
:param reason: reason as string
"""
def __init__(self, reason):
self.reason = reason
def __str__(self):
return '<%s instance at %s: %s>' % (
self.__class__.__name__,
hex(id(self)),
self.reason,
)
class VCCSCfgInteractError(VCCSCfgError):
"""
Exception class with extra information about when our HSM interactions fail.
"""
def __init__(self, reason, all_commands, all_data, expected, got, last_send, ):
VCCSCfgError.__init__(self, reason)
self.all_commands = all_commands
self.all_data = all_data
self.expected = expected
self.got = got
self.last_send = last_send
def __str__(self):
return '<%s instance at %s: %s\n(last send %s, expected %s, got %s>' % (
self.__class__.__name__,
hex(id(self)),
self.reason,
repr(self.last_send),
repr(self.expected),
repr(self.got),
)
class HsmSerial():
"""
Low-end interface to HSM. Read, write, those kinds of things.
"""
def __init__(self, device, logger):
self.device = device
self.logger = logger
self.ser = serial.Serial(device, 115200, timeout = 0.1)
def __repr__(self):
return '<%s instance at %s: %s>' % (
self.__class__.__name__,
hex(id(self)),
self.device
)
def __del__(self):
self.logger.debug("Destroying %s", str(self))
if self.ser:
self.ser.close()
def close(self):
"""
Close the HSM.
:return: True on success
"""
self.logger.debug("Closing %s", str(self))
self.ser.close()
self.ser = None
return True
def read(self, num_bytes):
"""
Read num_bytes from HSM.
:param num_bytes: Number of bytes to read.
:returns: Data as string
"""
data = self.ser.read(num_bytes)
return data
def write(self, data):
"""
Write data to HSM.
:param data: Data to write as string
"""
self.ser.write(str(data))
self.logger.debug("WRITE: %s" % (repr(data)))
def drain(self):
"""
Read until the read times out.
"""
data = ''
while True:
x = self.read(1)
if not x:
break
data += x
if data:
self.logger.debug("DRAIN : %s" % (repr(data)))
def interact(self, commands, retry_count = 5, add_cr = True):
"""
Process a list of 'send' or 'expect' command tuples.
e.g.
commands = [('send', 'sysinfo'),
('expect', '^YubiHSM version'),
...
]
:param commands: List of command-tuples
:param retry_count: Number of times to retry reading the expected result
:param add_cr: Add a Carriage-Return to the command sent or not
:returns: Command output as string
"""
data = ''
last_send = None
self.logger.debug("INTERACT: %s" % commands)
for (cmd, arg) in commands:
if cmd == 'send':
if arg or add_cr:
if add_cr:
arg += '\r'
self.write(arg)
last_send = arg
elif cmd == 'expect':
if not arg:
continue
cmd_data = ''
match = False
while not match:
this = self.readline(retry = 3)
if this:
cmd_data += this
else:
retry_count -= 1
if not retry_count:
raise VCCSCfgInteractError('YubiHSM did not produce the expected data "{!s}"'.format(arg),
commands, data, arg, cmd_data, last_send,
)
for line in cmd_data.split('\n'):
if re.match(arg, line):
match = True
data += cmd_data
else:
assert ()
return data
def readline(self, retry):
""" Read until the YubiHSM stops sending, or we spot a newline.
:param retry: Number of times to retry, integer
:returns: Read data as string (might be partial)
"""
data = ''
while True:
this = self.read(1)
if not this:
retry -= 1
if retry:
continue
self.logger.debug("READ: %s (timeout)" % (repr(data)))
return data
retry = 1 # No retrys when the HSM has started sending
data += this
if this == '\n' or this == '\r':
if len(data) > 1:
self.logger.debug("READ: %s" % (repr(data[:-1])))
return data
class HsmConfigurator():
"""
Class modelling the HSM to be configured.
:param args: argsparse data
:param logger: logging logger
:param cfg_password: HSM configuration password as string
:param master_key: HSM master key as string
:raise: VCCSCfgError on initialization error
"""
_DEVICE_BY_ID_DIR = '/dev/serial/by-id/'
class HsmLogFilter(logging.Filter):
"""
Logger filter implementing simple ON-OFF semantics.
"""
def filter(self, record):
"""
Filter function.
:param record: Something about to get logged.
:return: bool, Whether to log or not.
"""
if hasattr(self, 'enabled'):
return self.enabled
return True
def __init__(self, args, logger, cfg_password, master_key = None):
"""
:param args:
:param logger:
:param cfg_password:
:param master_key:
:raise:
"""
self.debug = args.debug
self.logger = logger
self.configured = None
self.yhsm_id = None
self.master_key = master_key
self.cfg_password = cfg_password
self.unprotected = False
self.hsm = HsmSerial(args.device, logger)
self.logger.debug("Opened %s" % self.hsm)
try:
if not self.execute('sysinfo', '^YubiHSM version'):
self.hsm.close()
raise VCCSCfgError('Failed executing sysinfo')
except Exception:
self.hsm.close()
raise
self.yhsm_id = self.hsm_id()
# set up contextual logger with our HSM id
try:
# self.logger is probably a LoggerAdapter already - use it's parent or our yhsm_id
# won't be visible
self.logger = logging.LoggerAdapter(self.logger.logger, {'yhsm_id': self.yhsm_id})
except Exception:
self.logger = logging.LoggerAdapter(self.logger, {'yhsm_id': self.yhsm_id})
self.logfilter = HsmConfigurator.HsmLogFilter()
self.logger.logger.addFilter(self.logfilter)
self.logger.debug("Opened %s", str(self))
self.hsm.logger = self.logger
def logging(self, status):
"""
Enable or disable logging.
:param status: bool, True to enable logging
"""
self.logfilter.enabled = status
def execute(self, command, expect, add_cr = True):
"""
Send one or more commands to the YubiHSM and read until we get the expected response,
and another prompt.
For better control, use interact() instead.
:param command: YubiHSM command to execute, string
:param expect: String expected to occur as a result of the executed command
:param add_cr: Add a Carriage-Return to the command sent or not
"""
self.hsm.drain()
next_prompt = '^(NO_CFG|HSM).*> .*'
data = self.hsm.interact([('send', ''),
('expect', next_prompt),
('send', command),
('expect', expect),
('expect', next_prompt),
], add_cr)
lines = data.split('\n')
if re.match('^(NO_CFG|HSM).*> .*', lines[-1]):
old = self.configured
self.configured = lines[-1].startswith('HSM')
if self.configured != old:
self.logger.debug(
"HSM configured status update : %s (based on '%s')" % (self.configured, lines[-1][:-1]))
# expected data seen (or none expected) and new prompt too
return data
def hsm_id(self):
"""
Get the CPU unique identifier of the HSM.
We have to look in /dev/serial/by-id/ to figure out the unique ID when the
HSM is in configuration mode.
"""
if self.yhsm_id:
return self.yhsm_id
(dirpath, dirnames, filenames) = os.walk(self._DEVICE_BY_ID_DIR).next()
for this in filenames:
link = os.readlink(os.path.join(self._DEVICE_BY_ID_DIR, this))
if os.path.abspath(os.path.join(dirpath, link)) == self.hsm.device:
m = re.match('usb-Yubico_Yubico_YubiHSM_([0-9A-F]+)-if00', this)
if m:
self.yhsm_id = m.groups()[0]
return self.yhsm_id
return this
raise Exception('Failed finding link to %s in %s' % (self.hsm.device, self._DEVICE_BY_ID_DIR))
def get_random(self, byte_count):
"""
Get random data from the HSM, and then XOR it with random data from /dev/urandom
to ensure against bad randomness in either source.
:param byte_count: Number of random bytes to return
:returns: Random data as string
"""
bsize = 16
# get 256 bytes extra to stir up the pool
output = self.execute('rng %i' % ((256 / bsize) + (byte_count / bsize)), '').split('\n')
hex_str = output[-2][:-1] # second last line, and remove \r
self.logger.debug("Got %s bytes of randomness from HSM" % (len(hex_str) / 2))
# select bytes to use like OATH does (last byte is offset from end)
last_byte = int(hex_str[-2:], 16)
self.logger.debug(
"Offset 0x%x, will use bytes %i-%i from end." % (last_byte, (byte_count + last_byte), last_byte))
from_hsm = hex_str.decode('hex')[-(byte_count + last_byte):-last_byte]
from_os = os.urandom(byte_count)
xored = ''.join([chr(ord(a) ^ ord(b)) for (a, b) in zip(from_hsm, from_os)])
self.logger.debug("Got %i bytes of randomness from HSM : '%s'" % (byte_count, from_hsm.encode('hex')))
self.logger.debug("Got %i bytes of randomness from OS : '%s'" % (byte_count, from_os.encode('hex')))
self.logger.debug("HSM and OS data xored together : '%s'" % (xored.encode('hex')))
return xored
def get_crypto_key(self, text, length = None, generate = False, pad = True):
"""
Prompt the user for a crypto key or, if generate==True, generate one using
a combination of the YubiHSM random number generator and the host OS RNG.
:param text: User prompt as string
:param length: Expected length in bytes, integer
:param generate: Generate or not, bool
:param pad: Pad or not, bool
:return: :raise:
"""
while True:
print ""
data_str = raw_input(text)
if not data_str:
if not generate:
continue
self.logger.info("No key given, will generate one using HSM and OS random generators.")
return self.get_random(length)
try:
data = data_str.decode('hex')
if length is not None:
if pad:
data = data.ljust(length, chr(0x0))
if length is not None and len(data) != length:
raise Exception('Key given is not %i bytes long (%i)' % (length, len(data)))
return data
except Exception as e:
self.logger.error("Failed decoding input : %s" % e)
def unlock_keystore(self, skip_test = False):
"""
Decrypt the key store in the HSM using the master key.
Prompt for the master key unless self.master_key is set already.
:param skip_test: Skip validating the keystore is accessible
"""
if not skip_test:
# check if we need to decrypt the keystore
prompt = self.execute('', '')[:-1]
if "keys not decrypted" not in prompt:
return True
if not self.master_key:
self.master_key = self.get_crypto_key("Enter the master key as hex : ",
length = 32, pad = True, generate = False).encode('hex')
master_key = self.master_key
(send, expect,) = ('send', 'expect',) # for color highlighting clarity below
commands = [(send, ''), (expect, '^HSM.*keys not decrypted.*> .*'),
(send, 'keydecrypt'), (expect, '.*Enter key.*'),
(send, master_key), (expect, '^Key decrypt succeeded'),
]
self.hsm.interact(commands)
return True
def unprotect(self):
"""
Remove write protect mode, using the cfg password.
"""
if self.unprotected:
return
(send, expect,) = ('send', 'expect',) # for color highlighting clarity below
commands = [(send, ''), (expect, '^HSM.*> .*'),
(send, 'unprot'), (expect, '.*enter password.*'),
(send, self.cfg_password), (expect, '.*ok.*'),
]
self.hsm.interact(commands)
self.unprotected = True
def keyload(self, key):
"""
Load this key into a HSM.
:param key: HsmKey()
"""
self.unprotect()
(send, expect,) = ('send', 'expect',) # for color highlighting clarity below
escape_char = chr(27)
commands = [(send, ''), (expect, '^HSM.*> .*'),
(send, 'flags %x' % key.flags), (expect, 'Enabled flags 0*%x = ' % key.flags),
(send, 'keyload'), (expect, '.*Load key data now.*'),
(send, '%s,%s,,,\r%c' % (key.keyid.rjust(8, '0'), key.key, escape_char)), (expect, '.*stored ok.*'),
(send, ''), (expect, '^HSM.*keys changed.*> .*'),
]
self.hsm.interact(commands)
def keylist(self):
"""
List all the keys in the HSM. Return a list of HsmKey instances.
"""
self.unlock_keystore()
response = self.execute('keylist', 'Entries.*invalid 00000 free.*') # safeguard against bad entries
keys = []
for line in response.split('\n'):
# format : "121113ab,00010002" or "121113ab,key-goes-here-if-debug,00010002"
match = re.match('^([0-9a-f]{8}),([0-9a-f,]+)*([0-9a-f]{8})\r$', line)
if match:
keyid = match.groups()[0]
# don't need the secret, so leave it out
flags = match.groups()[2]
keys.append(HsmKey(keyid, None, int(flags, 16), 'unknown'))
return sorted(keys, key = lambda this: this.keyid)
def disable_key(self, key):
"""
Disable a key handle. Overwrites the secret in the YubiHSM, but keeps the key
handle id occupied so a new key can't be written to an old id.
:param key: HsmKey to disable
"""
self.unprotect()
self.execute("keydis {s}".format(key.keyid), '')
def keycommit(self, check_with_user = True):
"""
Commit HSM keys to non-volatile storage inside the HSM, optionally verifying
this is the users intent.
:param check_with_user: Check with user before committing or not
"""
while check_with_user and True:
res = raw_input("Commit changes to keystore? Enter 'yes' or 'no' : ")
if res == "no":
self.logger.info("Keys NOT committed to permanent storage in HSM.")
# XXX should maybe 'keydecrypt' here to revert any added keys?
return False
elif res == "yes":
break
self.execute('keycommit', '.*Done')
self.logger.info("Keys committed to keystore.")
return True
|
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.config_controller import instance_pb2
from google3.cloud.graphite.mmv2.services.google.config_controller import (
instance_pb2_grpc,
)
from typing import List
class Instance(object):
def __init__(
self,
name: str = None,
labels: dict = None,
bundles_config: dict = None,
use_private_endpoint: bool = None,
gke_resource_link: str = None,
state: str = None,
management_config: dict = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.labels = labels
self.bundles_config = bundles_config
self.use_private_endpoint = use_private_endpoint
self.management_config = management_config
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = instance_pb2_grpc.ConfigcontrollerAlphaInstanceServiceStub(
channel.Channel()
)
request = instance_pb2.ApplyConfigcontrollerAlphaInstanceRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if InstanceBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
InstanceBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if InstanceManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
InstanceManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyConfigcontrollerAlphaInstance(request)
self.name = Primitive.from_proto(response.name)
self.labels = Primitive.from_proto(response.labels)
self.bundles_config = InstanceBundlesConfig.from_proto(response.bundles_config)
self.use_private_endpoint = Primitive.from_proto(response.use_private_endpoint)
self.gke_resource_link = Primitive.from_proto(response.gke_resource_link)
self.state = InstanceStateEnum.from_proto(response.state)
self.management_config = InstanceManagementConfig.from_proto(
response.management_config
)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = instance_pb2_grpc.ConfigcontrollerAlphaInstanceServiceStub(
channel.Channel()
)
request = instance_pb2.DeleteConfigcontrollerAlphaInstanceRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if InstanceBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
InstanceBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if InstanceManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
InstanceManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteConfigcontrollerAlphaInstance(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = instance_pb2_grpc.ConfigcontrollerAlphaInstanceServiceStub(
channel.Channel()
)
request = instance_pb2.ListConfigcontrollerAlphaInstanceRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListConfigcontrollerAlphaInstance(request).items
def to_proto(self):
resource = instance_pb2.ConfigcontrollerAlphaInstance()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if InstanceBundlesConfig.to_proto(self.bundles_config):
resource.bundles_config.CopyFrom(
InstanceBundlesConfig.to_proto(self.bundles_config)
)
else:
resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if InstanceManagementConfig.to_proto(self.management_config):
resource.management_config.CopyFrom(
InstanceManagementConfig.to_proto(self.management_config)
)
else:
resource.ClearField("management_config")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class InstanceBundlesConfig(object):
def __init__(self, config_controller_config: dict = None):
self.config_controller_config = config_controller_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_pb2.ConfigcontrollerAlphaInstanceBundlesConfig()
if InstanceBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
):
res.config_controller_config.CopyFrom(
InstanceBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
)
)
else:
res.ClearField("config_controller_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceBundlesConfig(
config_controller_config=InstanceBundlesConfigConfigControllerConfig.from_proto(
resource.config_controller_config
),
)
class InstanceBundlesConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceBundlesConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceBundlesConfig.from_proto(i) for i in resources]
class InstanceBundlesConfigConfigControllerConfig(object):
def __init__(self, enabled: bool = None):
self.enabled = enabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_pb2.ConfigcontrollerAlphaInstanceBundlesConfigConfigControllerConfig()
)
if Primitive.to_proto(resource.enabled):
res.enabled = Primitive.to_proto(resource.enabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceBundlesConfigConfigControllerConfig(
enabled=Primitive.from_proto(resource.enabled),
)
class InstanceBundlesConfigConfigControllerConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceBundlesConfigConfigControllerConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceBundlesConfigConfigControllerConfig.from_proto(i) for i in resources
]
class InstanceManagementConfig(object):
def __init__(self, standard_management_config: dict = None):
self.standard_management_config = standard_management_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_pb2.ConfigcontrollerAlphaInstanceManagementConfig()
if InstanceManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
):
res.standard_management_config.CopyFrom(
InstanceManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
)
)
else:
res.ClearField("standard_management_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceManagementConfig(
standard_management_config=InstanceManagementConfigStandardManagementConfig.from_proto(
resource.standard_management_config
),
)
class InstanceManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceManagementConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceManagementConfig.from_proto(i) for i in resources]
class InstanceManagementConfigStandardManagementConfig(object):
def __init__(
self,
network: str = None,
master_ipv4_cidr_block: str = None,
man_block: str = None,
cluster_cidr_block: str = None,
services_cidr_block: str = None,
cluster_named_range: str = None,
services_named_range: str = None,
):
self.network = network
self.master_ipv4_cidr_block = master_ipv4_cidr_block
self.man_block = man_block
self.cluster_cidr_block = cluster_cidr_block
self.services_cidr_block = services_cidr_block
self.cluster_named_range = cluster_named_range
self.services_named_range = services_named_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_pb2.ConfigcontrollerAlphaInstanceManagementConfigStandardManagementConfig()
)
if Primitive.to_proto(resource.network):
res.network = Primitive.to_proto(resource.network)
if Primitive.to_proto(resource.master_ipv4_cidr_block):
res.master_ipv4_cidr_block = Primitive.to_proto(
resource.master_ipv4_cidr_block
)
if Primitive.to_proto(resource.man_block):
res.man_block = Primitive.to_proto(resource.man_block)
if Primitive.to_proto(resource.cluster_cidr_block):
res.cluster_cidr_block = Primitive.to_proto(resource.cluster_cidr_block)
if Primitive.to_proto(resource.services_cidr_block):
res.services_cidr_block = Primitive.to_proto(resource.services_cidr_block)
if Primitive.to_proto(resource.cluster_named_range):
res.cluster_named_range = Primitive.to_proto(resource.cluster_named_range)
if Primitive.to_proto(resource.services_named_range):
res.services_named_range = Primitive.to_proto(resource.services_named_range)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceManagementConfigStandardManagementConfig(
network=Primitive.from_proto(resource.network),
master_ipv4_cidr_block=Primitive.from_proto(
resource.master_ipv4_cidr_block
),
man_block=Primitive.from_proto(resource.man_block),
cluster_cidr_block=Primitive.from_proto(resource.cluster_cidr_block),
services_cidr_block=Primitive.from_proto(resource.services_cidr_block),
cluster_named_range=Primitive.from_proto(resource.cluster_named_range),
services_named_range=Primitive.from_proto(resource.services_named_range),
)
class InstanceManagementConfigStandardManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceManagementConfigStandardManagementConfig.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceManagementConfigStandardManagementConfig.from_proto(i)
for i in resources
]
class InstanceStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_pb2.ConfigcontrollerAlphaInstanceStateEnum.Value(
"ConfigcontrollerAlphaInstanceStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_pb2.ConfigcontrollerAlphaInstanceStateEnum.Name(resource)[
len("ConfigcontrollerAlphaInstanceStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
"""Topology components for FAUCET Mininet unit tests."""
import os
import pty
import select
import socket
import string
import shutil
import subprocess
import time
import netifaces
# pylint: disable=import-error
from mininet.log import error, output
from mininet.topo import Topo
from mininet.node import Controller
from mininet.node import Host
from mininet.node import OVSSwitch
import faucet_mininet_test_util
# TODO: mininet 2.2.2 leaks ptys (master slave assigned in startShell)
# override as necessary close them. Transclude overridden methods
# to avoid multiple inheritance complexity.
class FaucetHostCleanup(object):
"""TODO: Mininet host implemenation leaks ptys."""
master = None
shell = None
slave = None
def startShell(self, mnopts=None):
if self.shell:
error('%s: shell is already running\n' % self.name)
return
opts = '-cd' if mnopts is None else mnopts
if self.inNamespace:
opts += 'n'
cmd = ['mnexec', opts, 'env', 'PS1=' + chr(127),
'bash', '--norc', '-is', 'mininet:' + self.name]
self.master, self.slave = pty.openpty()
self.shell = self._popen(
cmd, stdin=self.slave, stdout=self.slave, stderr=self.slave,
close_fds=False)
self.stdin = os.fdopen(self.master, 'rw')
self.stdout = self.stdin
self.pid = self.shell.pid
self.pollOut = select.poll()
self.pollOut.register(self.stdout)
self.outToNode[self.stdout.fileno()] = self
self.inToNode[self.stdin.fileno()] = self
self.execed = False
self.lastCmd = None
self.lastPid = None
self.readbuf = ''
while True:
data = self.read(1024)
if data[-1] == chr(127):
break
self.pollOut.poll()
self.waiting = False
self.cmd('unset HISTFILE; stty -echo; set +m')
def terminate(self):
if self.shell is not None:
os.close(self.master)
os.close(self.slave)
self.shell.kill()
self.cleanup()
class FaucetHost(FaucetHostCleanup, Host):
pass
class FaucetSwitch(FaucetHostCleanup, OVSSwitch):
"""Switch that will be used by all tests (kernel based OVS)."""
def __init__(self, name, **params):
super(FaucetSwitch, self).__init__(
name=name, datapath='kernel', **params)
class VLANHost(FaucetHost):
"""Implementation of a Mininet host on a tagged VLAN."""
def config(self, vlan=100, **params):
"""Configure VLANHost according to (optional) parameters:
vlan: VLAN ID for default interface"""
super_config = super(VLANHost, self).config(**params)
intf = self.defaultIntf()
vlan_intf_name = '%s.%d' % (intf, vlan)
for cmd in (
'ip -4 addr flush dev %s' % intf,
'ip -6 addr flush dev %s' % intf,
'vconfig add %s %d' % (intf, vlan),
'ip link set dev %s up' % vlan_intf_name,
'ip -4 addr add %s dev %s' % (params['ip'], vlan_intf_name)):
self.cmd(cmd)
intf.name = vlan_intf_name
self.nameToIntf[vlan_intf_name] = intf
return super_config
class FaucetSwitchTopo(Topo):
"""FAUCET switch topology that contains a software switch."""
def _get_sid_prefix(self, ports_served):
"""Return a unique switch/host prefix for a test."""
# Linux tools require short interface names.
# pylint: disable=no-member
id_chars = string.letters + string.digits
id_a = int(ports_served / len(id_chars))
id_b = ports_served - (id_a * len(id_chars))
return '%s%s' % (
id_chars[id_a], id_chars[id_b])
def _add_tagged_host(self, sid_prefix, tagged_vid, host_n):
"""Add a single tagged test host."""
host_name = 't%s%1.1u' % (sid_prefix, host_n + 1)
return self.addHost(name=host_name, cls=VLANHost, vlan=tagged_vid)
def _add_untagged_host(self, sid_prefix, host_n):
"""Add a single untagged test host."""
host_name = 'u%s%1.1u' % (sid_prefix, host_n + 1)
return self.addHost(name=host_name, cls=FaucetHost)
def _add_faucet_switch(self, sid_prefix, dpid):
"""Add a FAUCET switch."""
switch_name = 's%s' % sid_prefix
return self.addSwitch(
name=switch_name,
cls=FaucetSwitch,
dpid=faucet_mininet_test_util.mininet_dpid(dpid))
def build(self, ports_sock, test_name, dpids, n_tagged=0, tagged_vid=100, n_untagged=0):
for dpid in dpids:
serialno = faucet_mininet_test_util.get_serialno(
ports_sock, test_name)
sid_prefix = self._get_sid_prefix(serialno)
for host_n in range(n_tagged):
self._add_tagged_host(sid_prefix, tagged_vid, host_n)
for host_n in range(n_untagged):
self._add_untagged_host(sid_prefix, host_n)
switch = self._add_faucet_switch(sid_prefix, dpid)
for host in self.hosts():
self.addLink(host, switch)
class FaucetHwSwitchTopo(FaucetSwitchTopo):
"""FAUCET switch topology that contains a hardware switch."""
def build(self, ports_sock, test_name, dpids, n_tagged=0, tagged_vid=100, n_untagged=0):
for dpid in dpids:
serialno = faucet_mininet_test_util.get_serialno(
ports_sock, test_name)
sid_prefix = self._get_sid_prefix(serialno)
for host_n in range(n_tagged):
self._add_tagged_host(sid_prefix, tagged_vid, host_n)
for host_n in range(n_untagged):
self._add_untagged_host(sid_prefix, host_n)
remap_dpid = str(int(dpid) + 1)
output('bridging hardware switch DPID %s (%x) dataplane via OVS DPID %s (%x)' % (
dpid, int(dpid), remap_dpid, int(remap_dpid)))
dpid = remap_dpid
switch = self._add_faucet_switch(sid_prefix, dpid)
for host in self.hosts():
self.addLink(host, switch)
class FaucetStringOfDPSwitchTopo(FaucetSwitchTopo):
"""String of datapaths each with hosts with a single FAUCET controller."""
def build(self, ports_sock, test_name, dpids, n_tagged=0, tagged_vid=100, n_untagged=0):
"""
Hosts
||||
||||
+----+ +----+ +----+
---+1 | |1234| | 1+---
Hosts ---+2 | | | | 2+--- Hosts
---+3 | | | | 3+---
---+4 5+-------+5 6+-------+5 4+---
+----+ +----+ +----+
Faucet-1 Faucet-2 Faucet-3
| | |
| | |
+-------- controller -----+
* s switches (above S = 3; for S > 3, switches are added to the chain)
* (n_tagged + n_untagged) hosts per switch
* (n_tagged + n_untagged + 1) links on switches 0 and s-1,
with final link being inter-switch
* (n_tagged + n_untagged + 2) links on switches 0 < n < s-1,
with final two links being inter-switch
"""
last_switch = None
for dpid in dpids:
serialno = faucet_mininet_test_util.get_serialno(
ports_sock, test_name)
sid_prefix = self._get_sid_prefix(serialno)
hosts = []
for host_n in range(n_tagged):
hosts.append(self._add_tagged_host(sid_prefix, tagged_vid, host_n))
for host_n in range(n_untagged):
hosts.append(self._add_untagged_host(sid_prefix, host_n))
switch = self._add_faucet_switch(sid_prefix, dpid)
for host in hosts:
self.addLink(host, switch)
# Add a switch-to-switch link with the previous switch,
# if this isn't the first switch in the topology.
if last_switch is not None:
self.addLink(last_switch, switch)
last_switch = switch
class BaseFAUCET(Controller):
"""Base class for FAUCET and Gauge controllers."""
controller_intf = None
controller_ip = None
pid_file = None
tmpdir = None
ofcap = None
BASE_CARGS = ' '.join((
'--verbose',
'--use-stderr',
'--ofp-tcp-listen-port=%s'))
def __init__(self, name, tmpdir, controller_intf=None, cargs='', **kwargs):
name = '%s-%u' % (name, os.getpid())
self.tmpdir = tmpdir
self.controller_intf = controller_intf
super(BaseFAUCET, self).__init__(
name, cargs=self._add_cargs(cargs, name), **kwargs)
def _add_cargs(self, cargs, name):
ofp_listen_host_arg = ''
if self.controller_intf is not None:
# pylint: disable=no-member
self.controller_ip = netifaces.ifaddresses(
self.controller_intf)[socket.AF_INET][0]['addr']
ofp_listen_host_arg = '--ofp-listen-host=%s' % self.controller_ip
self.pid_file = os.path.join(self.tmpdir, name + '.pid')
pid_file_arg = '--pid-file=%s' % self.pid_file
return ' '.join((
self.BASE_CARGS, pid_file_arg, ofp_listen_host_arg, cargs))
def _start_tcpdump(self):
"""Start a tcpdump for OF port."""
self.ofcap = os.path.join(self.tmpdir, '-'.join((self.name, 'of.cap')))
tcpdump_args = ' '.join((
'-s 0',
'-e',
'-n',
'-U',
'-q',
'-i %s' % self.controller_intf,
'-w %s' % self.ofcap,
'tcp and port %u' % self.port,
'>/dev/null',
'2>/dev/null',
))
self.cmd('tcpdump %s &' % tcpdump_args)
def _tls_cargs(self, ofctl_port, ctl_privkey, ctl_cert, ca_certs):
"""Add TLS/cert parameters to Ryu."""
tls_cargs = []
for carg_val, carg_key in ((ctl_privkey, 'ctl-privkey'),
(ctl_cert, 'ctl-cert'),
(ca_certs, 'ca-certs')):
if carg_val:
tls_cargs.append(('--%s=%s' % (carg_key, carg_val)))
if tls_cargs:
tls_cargs.append(('--ofp-ssl-listen-port=%u' % ofctl_port))
return ' '.join(tls_cargs)
def _command(self, env, tmpdir, name, args):
"""Wrap controller startup command in shell script with environment."""
env_vars = []
for var, val in list(sorted(env.items())):
env_vars.append('='.join((var, val)))
script_wrapper_name = os.path.join(tmpdir, 'start-%s.sh' % name)
with open(script_wrapper_name, 'w') as script_wrapper:
script_wrapper.write(
'PYTHONPATH=.:..:../faucet %s exec python3 -m cProfile -s time /usr/local/bin/ryu-manager %s $*\n' % (
' '.join(env_vars), args))
return '/bin/sh %s' % script_wrapper_name
def ryu_pid(self):
"""Return PID of ryu-manager process."""
if os.path.exists(self.pid_file) and os.path.getsize(self.pid_file) > 0:
pid = None
with open(self.pid_file) as pid_file:
pid = int(pid_file.read())
return pid
return None
def listen_port(self, port, state='LISTEN'):
"""Return True if port in specified TCP state."""
listening_out = self.cmd(
faucet_mininet_test_util.tcp_listening_cmd(port, state=state)).split()
for pid in listening_out:
if int(pid) == self.ryu_pid():
return True
return False
# pylint: disable=invalid-name
def checkListening(self):
"""Mininet's checkListening() causes occasional false positives (with
exceptions we can't catch), and we handle port conflicts ourselves anyway."""
return
def listening(self):
"""Return True if controller listening on required ports."""
return self.listen_port(self.port)
def connected(self):
"""Return True if at least one switch connected and controller healthy."""
return self.healthy() and self.listen_port(self.port, state='ESTABLISHED')
def logname(self):
"""Return log file for controller."""
return os.path.join('/tmp', self.name + '.log')
def healthy(self):
"""Return True if controller logging and listening on required ports."""
if (os.path.exists(self.logname()) and
os.path.getsize(self.logname()) and
self.listening()):
return True
return False
def start(self):
"""Start tcpdump for OF port and then start controller."""
self._start_tcpdump()
super(BaseFAUCET, self).start()
def _stop_cap(self):
"""Stop tcpdump for OF port and run tshark to decode it."""
if os.path.exists(self.ofcap):
self.cmd(' '.join(['fuser', '-15', '-m', self.ofcap]))
text_ofcap_log = '%s.txt' % self.ofcap
with open(text_ofcap_log, 'w') as text_ofcap:
subprocess.call(
['tshark', '-l', '-n', '-Q',
'-d', 'tcp.port==%u,openflow' % self.port,
'-O', 'openflow_v4',
'-Y', 'openflow_v4',
'-r', self.ofcap],
stdout=text_ofcap,
stdin=faucet_mininet_test_util.DEVNULL,
stderr=faucet_mininet_test_util.DEVNULL,
close_fds=True)
def stop(self):
"""Stop controller."""
while self.healthy():
os.kill(self.ryu_pid(), 2)
time.sleep(1)
self._stop_cap()
super(BaseFAUCET, self).stop()
if os.path.exists(self.logname()):
tmpdir_logname = os.path.join(
self.tmpdir, os.path.basename(self.logname()))
if os.path.exists(tmpdir_logname):
os.remove(tmpdir_logname)
shutil.move(self.logname(), tmpdir_logname)
class FAUCET(BaseFAUCET):
"""Start a FAUCET controller."""
def __init__(self, name, tmpdir, controller_intf, env,
ctl_privkey, ctl_cert, ca_certs,
ports_sock, port, test_name, **kwargs):
self.ofctl_port = faucet_mininet_test_util.find_free_port(
ports_sock, test_name)
cargs = ' '.join((
'--wsapi-host=%s' % faucet_mininet_test_util.LOCALHOST,
'--wsapi-port=%u' % self.ofctl_port,
self._tls_cargs(port, ctl_privkey, ctl_cert, ca_certs)))
super(FAUCET, self).__init__(
name,
tmpdir,
controller_intf,
cargs=cargs,
command=self._command(env, tmpdir, name, 'ryu.app.ofctl_rest faucet.faucet'),
port=port,
**kwargs)
def listening(self):
return self.listen_port(self.ofctl_port) and super(FAUCET, self).listening()
class Gauge(BaseFAUCET):
"""Start a Gauge controller."""
def __init__(self, name, tmpdir, controller_intf, env,
ctl_privkey, ctl_cert, ca_certs,
port, **kwargs):
super(Gauge, self).__init__(
name,
tmpdir,
controller_intf,
cargs=self._tls_cargs(port, ctl_privkey, ctl_cert, ca_certs),
command=self._command(env, tmpdir, name, 'faucet.gauge'),
port=port,
**kwargs)
class FaucetAPI(BaseFAUCET):
"""Start a controller to run the Faucet API tests."""
def __init__(self, name, tmpdir, env, **kwargs):
super(FaucetAPI, self).__init__(
name,
tmpdir,
command=self._command(env, tmpdir, name, 'faucet.faucet test_api.py'),
**kwargs)
|
|
# -*- coding: utf-8 -*-
"""
This is part of HashBruteStation software
Docs EN: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station_en
Docs RU: http://hack4sec.pro/wiki/index.php/Hash_Brute_Station
License: MIT
Copyright (c) Anton Kuzmin <http://anton-kuzmin.ru> (ru) <http://anton-kuzmin.pro> (en)
Class of unit tests for WorkerThread
"""
import sys
import time
import pytest
sys.path.append('../../')
from libs.common import file_put_contents
from classes.ResultParseThread import ResultParseThread
from classes.HbsException import HbsException
from CommonUnit import CommonUnit
class Test_ResultParseThread(CommonUnit):
""" Class of unit tests for WorkerThread """
thrd = None
def setup(self):
""" Setup tests """
self._clean_db()
self._add_work_task()
self.thrd = ResultParseThread()
self.thrd.current_work_task_id = 1
self.thrd.catch_exceptions = False
def teardown(self):
""" Teardown tests """
if isinstance(self.thrd, ResultParseThread):
self.thrd.available = False
time.sleep(1)
del self.thrd
self._clean_db()
def test_update_status(self):
""" Testing update_status() method """
self._add_work_task(id=2)
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=1") == 'wait'
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=2") == 'wait'
self.thrd.update_status('done')
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=1") == 'done'
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=2") == 'wait'
def test_get_work_task_data(self):
""" Testing get_work_task_data() method """
data1 = self.thrd.get_work_task_data()
test_data1 = {'id': 1, 'task_id': 1, 'hashlist_id': 1, 'status': 'wait'}
for field in test_data1:
assert data1[field] == test_data1[field]
self._add_work_task(id=2, hashlist_id=3, task_id=4, status='outparsing')
self.thrd.current_work_task_id = 1
data2 = self.thrd.get_work_task_data()
test_data2 = {'id': 2, 'task_id': 4, 'hashlist_id': 3, 'status': 'outparsing'}
for field in test_data2:
assert data2[field] == test_data1[field]
def test_update_work_task_field(self):
""" Testing update_work_task_field() method """
self.thrd.update_work_task_field('status', 'done')
self.thrd.update_work_task_field('hashlist_id', '2')
assert self.db.fetch_one("SELECT status FROM task_works WHERE id=1") == 'done'
assert self.db.fetch_one("SELECT hashlist_id FROM task_works WHERE id=1") == 2
def test_update_all_hashlists_counts(self):
""" Test of update_all_hashlists_counts() """
self._add_hashlist()
self._add_hash(hash='a')
self._add_hash(hash='b', cracked=1, password='1')
self._add_hashlist(id=2)
self._add_hash(hashlist_id=2, hash='a')
self._add_hash(hashlist_id=2, hash='b')
self._add_hash(hashlist_id=2, hash='c', cracked=1, password='1')
self._add_hashlist(id=3)
self._add_hash(hashlist_id=3, hash='a')
self._add_hash(hashlist_id=3, hash='b')
self._add_hash(hashlist_id=3, hash='c')
self._add_hash(hashlist_id=3, hash='d')
self._add_hash(hashlist_id=3, hash='e', cracked=1, password='2')
self._add_hash(hashlist_id=3, hash='f', cracked=1, password='3')
self.thrd.update_all_hashlists_counts_by_alg_id(3)
assert self.db.fetch_one("SELECT uncracked FROM hashlists WHERE id=1") == 1
assert self.db.fetch_one("SELECT cracked FROM hashlists WHERE id=1") == 1
assert self.db.fetch_one("SELECT uncracked FROM hashlists WHERE id=2") == 2
assert self.db.fetch_one("SELECT cracked FROM hashlists WHERE id=2") == 1
assert self.db.fetch_one("SELECT uncracked FROM hashlists WHERE id=3") == 4
assert self.db.fetch_one("SELECT cracked FROM hashlists WHERE id=3") == 2
def test_get_current_work_task(self):
""" Test of get_current_work_task() """
assert self.thrd.get_current_work_task_id() == 1
self.thrd.current_work_task_id = 2
assert self.thrd.get_current_work_task_id() == 2
with pytest.raises(HbsException) as ex:
self.thrd.current_work_task_id = None
self.thrd.get_current_work_task_id()
assert "Current task for work not set" in str(ex)
def test_get_waiting_task_for_work(self):
""" Test of get_waiting_task_for_work() """
self._add_work_task(id=2, status='waitoutparse')
assert self.thrd.get_waiting_task_for_work() == 2
assert self.thrd.current_work_task_id == 2
self.db.update("task_works", {'status': 'waitoutparse'}, "id = 1")
assert self.thrd.get_waiting_task_for_work() == 1
assert self.thrd.current_work_task_id == 1
self.db.q("UPDATE task_works SET status = 'wait'")
self.thrd.get_waiting_task_for_work()
with pytest.raises(HbsException) as ex:
self.thrd.get_current_work_task_id()
assert "Current task for work not set" in str(ex)
assert self.thrd.current_work_task_id is None
def test_get_hashlist_data(self):
""" Test of get_hashlist_data() """
self._add_hashlist()
assert self.db.fetch_row("SELECT * FROM hashlists WHERE id = 1") == self.thrd.get_hashlist_data(1)
assert self.thrd.get_hashlist_data(33) is None
test_data = [
(
0,
[
{'id': 2, "name": "test2", 'alg_id': 3},
{'id': 3, "name": "test3", 'alg_id': 3},
{'id': 4, "name": "test4", 'alg_id': 4},
],
[
{'id': 1, 'hashlist_id': 2, 'hash': 'a', 'salt': '', 'summ': '0cc175b9c0f1b6a831c399e269772661'},
{'id': 2, 'hashlist_id': 3, 'hash': 'a', 'salt': '', 'summ': '0cc175b9c0f1b6a831c399e269772661'},
{'id': 3, 'hashlist_id': 4, 'hash': 'a', 'salt': '', 'summ': '0cc175b9c0f1b6a831c399e269772661'},
],
"a:70617373"
),
(
1,
[
{'id': 2, "name": "test2", 'alg_id': 3},
{'id': 3, "name": "test3", 'alg_id': 3},
{'id': 4, "name": "test4", 'alg_id': 4},
],
[
{'id': 1, 'hashlist_id': 2, 'hash': 'a', 'salt': 'b', 'summ': 'd8160c9b3dc20d4e931aeb4f45262155'},
{'id': 2, 'hashlist_id': 3, 'hash': 'a', 'salt': 'b', 'summ': 'd8160c9b3dc20d4e931aeb4f45262155'},
{'id': 3, 'hashlist_id': 4, 'hash': 'a', 'salt': 'b', 'summ': 'd8160c9b3dc20d4e931aeb4f45262155'},
],
"a:b:70617373"
),
]
@pytest.mark.parametrize("have_salt,hashlists,hashes,outfile_content", test_data)
def test_parse_outfile_and_fill_found_hashes(self, have_salt, hashlists, hashes, outfile_content):
""" Test of parse_outfile_and_fill_found_hashes() """
for hashlist in hashlists:
self._add_hashlist(id=hashlist['id'], name=hashlist['name'],
alg_id=hashlist['alg_id'], have_salts=have_salt)
for _hash in hashes:
self._add_hash(id=_hash['id'], hashlist_id=_hash['hashlist_id'],
hash=_hash['hash'], salt=_hash['salt'], summ=_hash['summ'])
file_put_contents("/tmp/test.txt", outfile_content)
assert [] == self.db.fetch_all("SELECT h.id, h.password, h.cracked FROM hashes h, hashlists hl "
"WHERE hl.id = h.hashlist_id AND hl.alg_id = 3 AND LENGTH(h.password) "
"AND h.cracked")
self.thrd.parse_outfile_and_fill_found_hashes({'out_file': '/tmp/test.txt'}, {'alg_id': 3})
test_data = [
{'id': 1, 'password': 'pass', 'cracked': 1},
{'id': 2, 'password': 'pass', 'cracked': 1}
]
assert test_data == self.db.fetch_all(
"SELECT h.id, h.password, h.cracked FROM hashes h, hashlists hl WHERE hl.id = h.hashlist_id "
"AND hl.alg_id = 3 AND LENGTH(h.password) AND h.cracked")
assert [{'id': 3, 'password': '', 'cracked': 0}] == self.db.fetch_all(
"SELECT h.id, h.password, h.cracked FROM hashes h, hashlists hl WHERE hl.id = h.hashlist_id "
"AND hl.alg_id = 4")
def test_update_task_uncracked_count(self):
""" Test of update_task_uncracked_count() """
self.db.update("task_works", {"uncracked_after": 100}, "id=1")
self._add_hash(password='p', hash='a', salt='b', cracked=1)
self._add_hash(hash='c', salt='d', cracked=0)
self.thrd.update_task_uncracked_count(1, 1)
assert self.db.fetch_one("SELECT uncracked_after FROM task_works WHERE id=1") == 1
|
|
"""Generates API documentation by introspection."""
from django.contrib.auth.models import AnonymousUser
import rest_framework
from rest_framework import viewsets
from rest_framework.serializers import BaseSerializer
from .introspectors import (
APIViewIntrospector,
BaseMethodIntrospector,
IntrospectorHelper,
ViewSetIntrospector,
WrappedAPIViewIntrospector,
get_data_type,
get_default_value,
)
from .compat import OrderedDict
class DocumentationGenerator(object):
# Serializers defined in docstrings
explicit_serializers = set()
# Serializers defined in fields
fields_serializers = set()
# Response classes defined in docstrings
explicit_response_types = dict()
def __init__(self, for_user=None):
self.user = for_user or AnonymousUser()
def generate(self, apis):
"""
Returns documentation for a list of APIs
"""
api_docs = []
for api in apis:
api_docs.append({
'description': IntrospectorHelper.get_summary(api['callback']),
'path': api['path'],
'operations': self.get_operations(api, apis),
})
return api_docs
def get_introspector(self, api, apis):
path = api['path']
pattern = api['pattern']
callback = api['callback']
if callback.__module__ == 'rest_framework.decorators':
return WrappedAPIViewIntrospector(callback, path, pattern, self.user)
elif issubclass(callback, viewsets.ViewSetMixin):
patterns = [a['pattern'] for a in apis
if a['callback'] == callback]
return ViewSetIntrospector(callback, path, pattern, self.user, patterns=patterns)
else:
return APIViewIntrospector(callback, path, pattern, self.user)
def get_operations(self, api, apis=None):
"""
Returns docs for the allowed methods of an API endpoint
"""
if apis is None:
apis = [api]
operations = []
introspector = self.get_introspector(api, apis)
for method_introspector in introspector:
if not isinstance(method_introspector, BaseMethodIntrospector) or \
method_introspector.get_http_method() == "OPTIONS":
continue # No one cares. I impose JSON.
doc_parser = method_introspector.get_yaml_parser()
serializer = self._get_method_serializer(method_introspector)
response_type = self._get_method_response_type(
doc_parser, serializer, introspector, method_introspector)
operation = {
'method': method_introspector.get_http_method(),
'summary': method_introspector.get_summary(),
'nickname': method_introspector.get_nickname(),
'notes': method_introspector.get_notes(),
'type': response_type,
}
if doc_parser.yaml_error is not None:
operation['notes'] += "<pre>YAMLError:\n {err}</pre>".format(
err=doc_parser.yaml_error)
response_messages = doc_parser.get_response_messages()
parameters = doc_parser.discover_parameters(
inspector=method_introspector)
operation['parameters'] = parameters or []
if response_messages:
operation['responseMessages'] = response_messages
operations.append(operation)
return operations
def get_models(self, apis):
"""
Builds a list of Swagger 'models'. These represent
DRF serializers and their fields
"""
serializers = self._get_serializer_set(apis)
serializers.update(self.explicit_serializers)
serializers.update(
self._find_field_serializers(serializers)
)
models = {}
for serializer in serializers:
data = self._get_serializer_fields(serializer)
# Register 2 models with different subset of properties suitable
# for data reading and writing.
# i.e. rest framework does not output write_only fields in response
# or require read_only fields in complex input.
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
# Writing
# no readonly fields
w_name = "Write{serializer}".format(serializer=serializer_name)
w_properties = OrderedDict((k, v) for k, v in data['fields'].items()
if k not in data['read_only'])
models[w_name] = {
'id': w_name,
'required': [i for i in data['required'] if i in w_properties.keys()],
'properties': w_properties,
}
# Reading
# no write_only fields
r_name = serializer_name
r_properties = OrderedDict((k, v) for k, v in data['fields'].items()
if k not in data['write_only'])
models[r_name] = {
'id': r_name,
'required': [i for i in r_properties.keys()],
'properties': r_properties,
}
# Enable original model for testing purposes
# models[serializer_name] = {
# 'id': serializer_name,
# 'required': data['required'],
# 'properties': data['fields'],
# }
models.update(self.explicit_response_types)
models.update(self.fields_serializers)
return models
def _get_method_serializer(self, method_inspector):
"""
Returns serializer used in method.
Registers custom serializer from docstring in scope.
Serializer might be ignored if explicitly told in docstring
"""
serializer = method_inspector.get_response_serializer_class()
doc_parser = method_inspector.get_yaml_parser()
if doc_parser.get_response_type() is not None:
# Custom response class detected
return None
if doc_parser.should_omit_serializer():
serializer = None
return serializer
def _get_method_response_type(self, doc_parser, serializer,
view_inspector, method_inspector):
"""
Returns response type for method.
This might be custom `type` from docstring or discovered
serializer class name.
Once custom `type` found in docstring - it'd be
registered in a scope
"""
response_type = doc_parser.get_response_type()
if response_type is not None:
# Register class in scope
view_name = view_inspector.callback.__name__
view_name = view_name.replace('ViewSet', '')
view_name = view_name.replace('APIView', '')
view_name = view_name.replace('View', '')
response_type_name = "{view}{method}Response".format(
view=view_name,
method=method_inspector.method.title().replace('_', '')
)
self.explicit_response_types.update({
response_type_name: {
"id": response_type_name,
"properties": response_type
}
})
return response_type_name
else:
serializer_name = IntrospectorHelper.get_serializer_name(serializer)
if serializer_name is not None:
return serializer_name
return 'object'
def _get_serializer_set(self, apis):
"""
Returns a set of serializer classes for a provided list
of APIs
"""
serializers = set()
for api in apis:
introspector = self.get_introspector(api, apis)
for method_introspector in introspector:
serializer = self._get_method_serializer(method_introspector)
if serializer is not None:
serializers.add(serializer)
extras = method_introspector.get_extra_serializer_classes()
for extra in extras:
if extra is not None:
serializers.add(extra)
return serializers
def _find_field_serializers(self, serializers, found_serializers=set()):
"""
Returns set of serializers discovered from fields
"""
def get_thing(field, key):
if rest_framework.VERSION >= '3.0.0':
from rest_framework.serializers import ListSerializer
if isinstance(field, ListSerializer):
return key(field.child)
return key(field)
serializers_set = set()
for serializer in serializers:
fields = serializer().get_fields()
for name, field in fields.items():
if isinstance(field, BaseSerializer):
serializers_set.add(get_thing(field, lambda f: f))
if field not in found_serializers:
serializers_set.update(
self._find_field_serializers(
(get_thing(field, lambda f: f.__class__),),
serializers_set))
return serializers_set
def _get_serializer_fields(self, serializer):
"""
Returns serializer fields in the Swagger MODEL format
"""
if serializer is None:
return
if hasattr(serializer, '__call__'):
fields = serializer().get_fields()
else:
fields = serializer.get_fields()
data = OrderedDict({
'fields': OrderedDict(),
'required': [],
'write_only': [],
'read_only': [],
})
for name, field in fields.items():
if getattr(field, 'write_only', False):
data['write_only'].append(name)
if getattr(field, 'read_only', False):
data['read_only'].append(name)
if getattr(field, 'required', False):
data['required'].append(name)
data_type, data_format = get_data_type(field) or ('string', 'string')
if data_type == 'hidden':
continue
# guess format
# data_format = 'string'
# if data_type in BaseMethodIntrospector.PRIMITIVES:
# data_format = BaseMethodIntrospector.PRIMITIVES.get(data_type)[0]
description = getattr(field, 'help_text', '')
if not description or description.strip() == '':
description = None
f = {
'description': description,
'type': data_type,
'format': data_format,
'required': getattr(field, 'required', False),
'defaultValue': get_default_value(field),
'readOnly': getattr(field, 'read_only', None),
}
# Swagger type is a primitive, format is more specific
if f['type'] == f['format']:
del f['format']
# defaultValue of null is not allowed, it is specific to type
if f['defaultValue'] is None:
del f['defaultValue']
# Min/Max values
max_val = getattr(field, 'max_val', None)
min_val = getattr(field, 'min_val', None)
if max_val is not None and data_type == 'integer':
f['minimum'] = min_val
if max_val is not None and data_type == 'integer':
f['maximum'] = max_val
# ENUM options
if data_type in BaseMethodIntrospector.ENUMS:
if isinstance(field.choices, list):
f['enum'] = [k for k, v in field.choices]
elif isinstance(field.choices, dict):
f['enum'] = [k for k, v in field.choices.items()]
# Support for complex types
if rest_framework.VERSION < '3.0.0':
has_many = hasattr(field, 'many') and field.many
else:
from rest_framework.serializers import ListSerializer, ManyRelatedField
has_many = isinstance(field, (ListSerializer, ManyRelatedField))
if isinstance(field, BaseSerializer) or has_many:
if isinstance(field, BaseSerializer):
field_serializer = IntrospectorHelper.get_serializer_name(field)
if getattr(field, 'write_only', False):
field_serializer = "Write{}".format(field_serializer)
f['type'] = field_serializer
else:
field_serializer = None
data_type = 'string'
if has_many:
f['type'] = 'array'
if field_serializer:
f['items'] = {'$ref': field_serializer}
elif data_type in BaseMethodIntrospector.PRIMITIVES:
f['items'] = {'type': data_type}
# memorize discovered field
data['fields'][name] = f
return data
|
|
import mock
import pytest
import requests
from collections import namedtuple
from awx.api.views import (
ApiVersionRootView,
JobTemplateLabelList,
JobTemplateSurveySpec,
InventoryInventorySourcesUpdate,
InventoryHostsList,
HostInsights,
)
from awx.main.models import (
Host,
)
from awx.main.managers import HostManager
@pytest.fixture
def mock_response_new(mocker):
m = mocker.patch('awx.api.views.Response.__new__')
m.return_value = m
return m
class TestApiRootView:
def test_get_endpoints(self, mocker, mock_response_new):
endpoints = [
'authtoken',
'ping',
'config',
#'settings',
'me',
'dashboard',
'organizations',
'users',
'projects',
'teams',
'credentials',
'inventory',
'inventory_scripts',
'inventory_sources',
'groups',
'hosts',
'job_templates',
'jobs',
'ad_hoc_commands',
'system_job_templates',
'system_jobs',
'schedules',
'notification_templates',
'notifications',
'labels',
'unified_job_templates',
'unified_jobs',
'activity_stream',
'workflow_job_templates',
'workflow_jobs',
]
view = ApiVersionRootView()
ret = view.get(mocker.MagicMock())
assert ret == mock_response_new
data_arg = mock_response_new.mock_calls[0][1][1]
for endpoint in endpoints:
assert endpoint in data_arg
class TestJobTemplateLabelList:
def test_inherited_mixin_unattach(self):
with mock.patch('awx.api.generics.DeleteLastUnattachLabelMixin.unattach') as mixin_unattach:
view = JobTemplateLabelList()
mock_request = mock.MagicMock()
super(JobTemplateLabelList, view).unattach(mock_request, None, None)
assert mixin_unattach.called_with(mock_request, None, None)
class TestJobTemplateSurveySpec(object):
@mock.patch('awx.api.views.feature_enabled', lambda feature: True)
def test_get_password_type(self, mocker, mock_response_new):
JobTemplate = namedtuple('JobTemplate', 'survey_spec')
obj = JobTemplate(survey_spec={'spec':[{'type': 'password', 'default': 'my_default'}]})
with mocker.patch.object(JobTemplateSurveySpec, 'get_object', return_value=obj):
view = JobTemplateSurveySpec()
response = view.get(mocker.MagicMock())
assert response == mock_response_new
# which there was a better way to do this!
assert response.call_args[0][1]['spec'][0]['default'] == '$encrypted$'
class TestInventoryInventorySourcesUpdate:
@pytest.mark.parametrize("can_update, can_access, is_source, is_up_on_proj, expected", [
(True, True, "ec2", False, [{'status': 'started', 'inventory_update': 1, 'inventory_source': 1}]),
(False, True, "gce", False, [{'status': 'Could not start because `can_update` returned False', 'inventory_source': 1}]),
(True, False, "scm", True, [{'status': 'started', 'inventory_update': 1, 'inventory_source': 1}]),
])
def test_post(self, mocker, can_update, can_access, is_source, is_up_on_proj, expected):
class InventoryUpdate:
id = 1
class Project:
name = 'project'
InventorySource = namedtuple('InventorySource', ['source', 'update_on_project_update', 'pk', 'can_update',
'update', 'source_project'])
class InventorySources(object):
def all(self):
return [InventorySource(pk=1, source=is_source, source_project=Project,
update_on_project_update=is_up_on_proj,
can_update=can_update, update=lambda:InventoryUpdate)]
def exclude(self, **kwargs):
return self.all()
Inventory = namedtuple('Inventory', ['inventory_sources', 'kind'])
obj = Inventory(inventory_sources=InventorySources(), kind='')
mock_request = mocker.MagicMock()
mock_request.user.can_access.return_value = can_access
with mocker.patch.object(InventoryInventorySourcesUpdate, 'get_object', return_value=obj):
view = InventoryInventorySourcesUpdate()
response = view.post(mock_request)
assert response.data == expected
class TestHostInsights():
@pytest.fixture
def patch_parent(self, mocker):
mocker.patch('awx.api.generics.GenericAPIView')
@pytest.mark.parametrize("status_code, exception, error, message", [
(502, requests.exceptions.SSLError, 'SSLError while trying to connect to https://myexample.com/whocares/me/', None,),
(504, requests.exceptions.Timeout, 'Request to https://myexample.com/whocares/me/ timed out.', None,),
(502, requests.exceptions.RequestException, 'booo!', 'Unkown exception booo! while trying to GET https://myexample.com/whocares/me/'),
])
def test_get_insights_request_exception(self, patch_parent, mocker, status_code, exception, error, message):
view = HostInsights()
mocker.patch.object(view, '_get_insights', side_effect=exception(error))
(msg, code) = view.get_insights('https://myexample.com/whocares/me/', 'ignore', 'ignore')
assert code == status_code
assert msg['error'] == message or error
def test_get_insights_non_200(self, patch_parent, mocker):
view = HostInsights()
Response = namedtuple('Response', 'status_code content')
mocker.patch.object(view, '_get_insights', return_value=Response(500, 'mock 500 err msg'))
(msg, code) = view.get_insights('https://myexample.com/whocares/me/', 'ignore', 'ignore')
assert msg['error'] == 'Failed to gather reports and maintenance plans from Insights API at URL https://myexample.com/whocares/me/. Server responded with 500 status code and message mock 500 err msg'
def test_get_insights_401(self, patch_parent, mocker):
view = HostInsights()
Response = namedtuple('Response', 'status_code content')
mocker.patch.object(view, '_get_insights', return_value=Response(401, ''))
(msg, code) = view.get_insights('https://myexample.com/whocares/me/', 'ignore', 'ignore')
assert msg['error'] == 'Unauthorized access. Please check your Insights Credential username and password.'
def test_get_insights_malformed_json_content(self, patch_parent, mocker):
view = HostInsights()
class Response():
status_code = 200
content = 'booo!'
def json(self):
raise ValueError('we do not care what this is')
mocker.patch.object(view, '_get_insights', return_value=Response())
(msg, code) = view.get_insights('https://myexample.com/whocares/me/', 'ignore', 'ignore')
assert msg['error'] == 'Expected JSON response from Insights but instead got booo!'
assert code == 502
#def test_get_not_insights_host(self, patch_parent, mocker, mock_response_new):
#def test_get_not_insights_host(self, patch_parent, mocker):
def test_get_not_insights_host(self, mocker):
view = HostInsights()
host = Host()
host.insights_system_id = None
mocker.patch.object(view, 'get_object', return_value=host)
resp = view.get(None)
assert resp.data['error'] == 'This host is not recognized as an Insights host.'
assert resp.status_code == 404
def test_get_no_credential(self, patch_parent, mocker):
view = HostInsights()
class MockInventory():
insights_credential = None
name = 'inventory_name_here'
class MockHost():
insights_system_id = 'insights_system_id_value'
inventory = MockInventory()
mocker.patch.object(view, 'get_object', return_value=MockHost())
resp = view.get(None)
assert resp.data['error'] == 'The Insights Credential for "inventory_name_here" was not found.'
assert resp.status_code == 404
class TestInventoryHostsList(object):
def test_host_list_smart_inventory(self, mocker):
Inventory = namedtuple('Inventory', ['kind', 'host_filter', 'hosts', 'organization_id'])
obj = Inventory(kind='smart', host_filter='localhost', hosts=HostManager(), organization_id=None)
obj.hosts.instance = obj
with mock.patch.object(InventoryHostsList, 'get_parent_object', return_value=obj):
with mock.patch('awx.main.utils.filters.SmartFilter.query_from_string') as mock_query:
view = InventoryHostsList()
view.get_queryset()
mock_query.assert_called_once_with('localhost')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import base64
import cherrypy
import datetime
from ..describe import Description, describeRoute
from girder.api import access
from girder.api.rest import Resource, RestException, AccessException,\
filtermodel, loadmodel
from girder.constants import AccessType, SettingKey, TokenScope
from girder.models.token import genToken
from girder.utility import mail_utils
class User(Resource):
"""API Endpoint for users in the system."""
def __init__(self):
super(User, self).__init__()
self.resourceName = 'user'
self.route('DELETE', ('authentication',), self.logout)
self.route('DELETE', (':id',), self.deleteUser)
self.route('GET', (), self.find)
self.route('GET', ('me',), self.getMe)
self.route('GET', ('authentication',), self.login)
self.route('GET', (':id',), self.getUser)
self.route('GET', (':id', 'details'), self.getUserDetails)
self.route('POST', (), self.createUser)
self.route('PUT', (':id',), self.updateUser)
self.route('PUT', ('password',), self.changePassword)
self.route('PUT', (':id', 'password'), self.changeUserPassword)
self.route('GET', ('password', 'temporary', ':id'),
self.checkTemporaryPassword)
self.route('PUT', ('password', 'temporary'),
self.generateTemporaryPassword)
self.route('DELETE', ('password',), self.resetPassword)
@access.public
@filtermodel(model='user')
@describeRoute(
Description('List or search for users.')
.responseClass('User')
.param('text', "Pass this to perform a full text search for items.",
required=False)
.pagingParams(defaultSort='lastName')
)
def find(self, params):
limit, offset, sort = self.getPagingParameters(params, 'lastName')
return list(self.model('user').search(
text=params.get('text'), user=self.getCurrentUser(), offset=offset,
limit=limit, sort=sort))
@access.public(scope=TokenScope.USER_INFO_READ)
@loadmodel(map={'id': 'userToGet'}, model='user', level=AccessType.READ)
@filtermodel(model='user')
@describeRoute(
Description('Get a user by ID.')
.responseClass('User')
.param('id', 'The ID of the user.', paramType='path')
.errorResponse('ID was invalid.')
.errorResponse('You do not have permission to see this user.', 403)
)
def getUser(self, userToGet, params):
return userToGet
@access.public(scope=TokenScope.USER_INFO_READ)
@filtermodel(model='user')
@describeRoute(
Description('Retrieve the currently logged-in user information.')
.responseClass('User')
)
def getMe(self, params):
return self.getCurrentUser()
@access.public
@describeRoute(
Description('Log in to the system.')
.notes('Pass your username and password using HTTP Basic Auth. Sends'
' a cookie that should be passed back in future requests.')
.errorResponse('Missing Authorization header.', 401)
.errorResponse('Invalid login or password.', 403)
)
def login(self, params):
"""
Login endpoint. Sends an auth cookie in the response on success.
The caller is expected to use HTTP Basic Authentication when calling
this endpoint.
"""
user, token = self.getCurrentUser(returnToken=True)
# Only create and send new cookie if user isn't already sending
# a valid one.
if not user:
authHeader = cherrypy.request.headers.get('Girder-Authorization')
if not authHeader:
authHeader = cherrypy.request.headers.get('Authorization')
if not authHeader or not authHeader[0:6] == 'Basic ':
raise RestException('Use HTTP Basic Authentication', 401)
try:
credentials = base64.b64decode(authHeader[6:]).decode('utf8')
if ':' not in credentials:
raise TypeError
except Exception:
raise RestException('Invalid HTTP Authorization header', 401)
login, password = credentials.split(':', 1)
login = login.lower().strip()
loginField = 'email' if '@' in login else 'login'
user = self.model('user').findOne({loginField: login})
if user is None:
raise RestException('Login failed.', code=403)
if not self.model('password').authenticate(user, password):
raise RestException('Login failed.', code=403)
setattr(cherrypy.request, 'girderUser', user)
token = self.sendAuthTokenCookie(user)
return {
'user': self.model('user').filter(user, user),
'authToken': {
'token': token['_id'],
'expires': token['expires'],
'scope': token['scope']
},
'message': 'Login succeeded.'
}
@access.user
@describeRoute(
Description('Log out of the system.')
.responseClass('Token')
.notes('Attempts to delete your authentication cookie.')
)
def logout(self, params):
token = self.getCurrentToken()
if token:
self.model('token').remove(token)
self.deleteAuthTokenCookie()
return {'message': 'Logged out.'}
@access.public
@filtermodel(model='user', addFields={'authToken'})
@describeRoute(
Description('Create a new user.')
.responseClass('User')
.param('login', "The user's requested login.")
.param('email', "The user's email address.")
.param('firstName', "The user's first name.")
.param('lastName', "The user's last name.")
.param('password', "The user's requested password")
.param('admin', 'Whether this user should be a site administrator.',
required=False, dataType='boolean')
.errorResponse('A parameter was invalid, or the specified login or'
' email already exists in the system.')
)
def createUser(self, params):
self.requireParams(
('firstName', 'lastName', 'login', 'password', 'email'), params)
currentUser = self.getCurrentUser()
if currentUser is not None and currentUser['admin']:
admin = self.boolParam('admin', params, default=False)
else:
admin = False
regPolicy = self.model('setting').get(
SettingKey.REGISTRATION_POLICY, default='open')
if regPolicy != 'open':
raise RestException(
'Registration on this instance is closed. Contact an '
'administrator to create an account for you.')
user = self.model('user').createUser(
login=params['login'], password=params['password'],
email=params['email'], firstName=params['firstName'],
lastName=params['lastName'], admin=admin)
if currentUser is None:
setattr(cherrypy.request, 'girderUser', user)
token = self.sendAuthTokenCookie(user)
user['authToken'] = {
'token': token['_id'],
'expires': token['expires']
}
return user
@access.user
@loadmodel(map={'id': 'userToDelete'}, model='user', level=AccessType.ADMIN)
@describeRoute(
Description('Delete a user by ID.')
.param('id', 'The ID of the user.', paramType='path')
.errorResponse('ID was invalid.')
.errorResponse('You do not have permission to delete this user.', 403)
)
def deleteUser(self, userToDelete, params):
self.model('user').remove(userToDelete)
return {'message': 'Deleted user %s.' % userToDelete['login']}
@access.user
@loadmodel(model='user', level=AccessType.WRITE)
@filtermodel(model='user')
@describeRoute(
Description("Update a user's information.")
.param('id', 'The ID of the user.', paramType='path')
.param('firstName', 'First name of the user.')
.param('lastName', 'Last name of the user.')
.param('email', 'The email of the user.')
.param('admin', 'Is the user a site admin (admin access required)',
required=False, dataType='boolean')
.errorResponse()
.errorResponse('You do not have write access for this user.', 403)
.errorResponse('Must be an admin to create an admin.', 403)
)
def updateUser(self, user, params):
self.requireParams(('firstName', 'lastName', 'email'), params)
user['firstName'] = params['firstName']
user['lastName'] = params['lastName']
user['email'] = params['email']
# Only admins can change admin state
if 'admin' in params:
newAdminState = self.boolParam('admin', params)
if self.getCurrentUser()['admin']:
user['admin'] = newAdminState
else:
if newAdminState != user['admin']:
raise AccessException('Only admins may change admin state.')
return self.model('user').save(user)
@access.admin
@loadmodel(model='user', level=AccessType.ADMIN)
@describeRoute(
Description('Change a user\'s password.')
.notes('Only administrators may use this endpoint.')
.param('id', 'The ID of the user.', paramType='path')
.param('password', 'The user\'s new password.')
.errorResponse('You are not an administrator.', 403)
.errorResponse('The new password is invalid.')
)
def changeUserPassword(self, user, params):
self.requireParams('password', params)
self.model('user').setPassword(user, params['password'])
return {'message': 'Password changed.'}
@access.user
@describeRoute(
Description('Change your password.')
.param('old', 'Your current password or a temporary access token.')
.param('new', 'Your new password.')
.errorResponse('You are not logged in.', 401)
.errorResponse('Your old password is incorrect.', 401)
.errorResponse('Your new password is invalid.')
)
def changePassword(self, params):
self.requireParams(('old', 'new'), params)
user = self.getCurrentUser()
token = None
if not params['old']:
raise RestException('Old password must not be empty.')
if (not self.model('password').hasPassword(user) or
not self.model('password').authenticate(user, params['old'])):
# If not the user's actual password, check for temp access token
token = self.model('token').load(
params['old'], force=True, objectId=False, exc=False)
if (not token or not token.get('userId') or
token['userId'] != user['_id'] or
not self.model('token').hasScope(
token, TokenScope.TEMPORARY_USER_AUTH)):
raise AccessException('Old password is incorrect.')
self.model('user').setPassword(user, params['new'])
if token:
# Remove the temporary access token if one was used
self.model('token').remove(token)
return {'message': 'Password changed.'}
@access.public
@describeRoute(
Description('Reset a forgotten password via email.')
.param('email', 'Your email address.')
.errorResponse('That email does not exist in the system.')
)
def resetPassword(self, params):
self.requireParams('email', params)
email = params['email'].lower().strip()
user = self.model('user').findOne({'email': email})
if user is None:
raise RestException('That email is not registered.')
randomPass = genToken(length=12)
html = mail_utils.renderTemplate('resetPassword.mako', {
'password': randomPass
})
mail_utils.sendEmail(to=email, subject='Girder: Password reset',
text=html)
self.model('user').setPassword(user, randomPass)
return {'message': 'Sent password reset email.'}
@access.public
@describeRoute(
Description('Create a temporary access token for a user. The user\'s '
'password is not changed.')
.param('email', 'Your email address.')
.errorResponse('That email does not exist in the system.')
)
def generateTemporaryPassword(self, params):
self.requireParams('email', params)
email = params['email'].lower().strip()
user = self.model('user').findOne({'email': email})
if not user:
raise RestException('That email is not registered.')
token = self.model('token').createToken(
user, days=1, scope=TokenScope.TEMPORARY_USER_AUTH)
url = '%s/#useraccount/%s/token/%s' % (
mail_utils.getEmailUrlPrefix(), str(user['_id']), str(token['_id']))
html = mail_utils.renderTemplate('temporaryAccess.mako', {
'url': url,
'token': str(token['_id'])
})
mail_utils.sendEmail(to=email, subject='Girder: Temporary access',
text=html)
return {'message': 'Sent temporary access email.'}
@access.public
@loadmodel(model='user', force=True)
@describeRoute(
Description('Check if a specified token is a temporary access token '
'for the specified user. If the token is valid, returns '
'information on the token and user.')
.param('id', 'The user ID to check.', paramType='path')
.param('token', 'The token to check.')
.errorResponse('The token does not grant temporary access to the '
'specified user.', 401)
)
def checkTemporaryPassword(self, user, params):
self.requireParams('token', params)
token = self.model('token').load(
params['token'], force=True, objectId=False, exc=True)
delta = (token['expires'] - datetime.datetime.utcnow()).total_seconds()
hasScope = self.model('token').hasScope(
token, TokenScope.TEMPORARY_USER_AUTH)
if token.get('userId') != user['_id'] or delta <= 0 or not hasScope:
raise AccessException(
'The token does not grant temporary access to this user.')
# Temp auth is verified, send an actual auth token now. We keep the
# temp token around since it can still be used on a subsequent request
# to change the password
authToken = self.sendAuthTokenCookie(user)
return {
'user': self.model('user').filter(user, user),
'authToken': {
'token': authToken['_id'],
'expires': authToken['expires'],
'temporary': True
},
'message': 'Temporary access token is valid.'
}
@access.public
@loadmodel(model='user', level=AccessType.READ)
@describeRoute(
Description('Get detailed information about a user.')
.param('id', 'The ID of the user.', paramType='path')
.errorResponse()
.errorResponse('Read access was denied on the user.', 403)
)
def getUserDetails(self, user, params):
return {
'nFolders': self.model('user').countFolders(
user, filterUser=self.getCurrentUser(), level=AccessType.READ)
}
|
|
#!/usr/bin/env python
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""Tests on XSD meta schema and XSD builtins"""
import unittest
from textwrap import dedent
from xmlschema import XMLSchemaDecodeError, XMLSchemaEncodeError, \
XMLSchemaValidationError, XMLSchema10, XMLSchema11
from xmlschema.names import XSD_STRING
from xmlschema.helpers import is_etree_element
from xmlschema.validators.builtins import XSD_10_BUILTIN_TYPES, \
XSD_11_BUILTIN_TYPES, xsd_builtin_types_factory
class TestXsd10BuiltinTypes(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.schema_class = XMLSchema10
cls.types = XMLSchema10.builtin_types()
@classmethod
def tearDownClass(cls):
XMLSchema10.meta_schema.clear()
def test_facet_lists(self):
for builtin_types in (XSD_10_BUILTIN_TYPES, XSD_11_BUILTIN_TYPES):
for item in builtin_types:
if 'facets' in item:
self.assertIsInstance(item['facets'], list)
self.assertLessEqual(len([e for e in item['facets'] if callable(e)]), 1)
for e in item['facets']:
self.assertTrue(callable(e) or is_etree_element(e))
def test_factory(self):
schema = self.schema_class(dedent("""\
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:element name="root"/>
</xs:schema>"""), use_meta=False, build=False)
with self.assertRaises(ValueError) as ctx:
xsd_types = {XSD_STRING: (None, schema)}
xsd_builtin_types_factory(schema.meta_schema, xsd_types)
self.assertEqual(str(ctx.exception), "loaded entry schema is not the meta-schema!")
def test_boolean_decode(self):
boolean_type = self.types['boolean']
self.assertTrue(boolean_type.decode(' true \n') is True)
self.assertTrue(boolean_type.decode(' 0 \n') is False)
self.assertTrue(boolean_type.decode(' 1 \n') is True)
self.assertTrue(boolean_type.decode(' false \n') is False)
self.assertRaises(XMLSchemaDecodeError, boolean_type.decode, ' 1.0 ')
self.assertRaises(XMLSchemaDecodeError, boolean_type.decode, ' alpha \n')
def test_boolean_encode(self):
boolean_type = self.types['boolean']
self.assertTrue(boolean_type.encode(True) == 'true')
self.assertTrue(boolean_type.encode(False) == 'false')
self.assertRaises(XMLSchemaEncodeError, boolean_type.encode, 1)
self.assertRaises(XMLSchemaEncodeError, boolean_type.encode, 0)
self.assertRaises(XMLSchemaEncodeError, boolean_type.encode, 10)
self.assertRaises(XMLSchemaEncodeError, boolean_type.encode, 'alpha')
def test_integer_decode(self):
integer_type = self.types['integer']
self.assertTrue(integer_type.decode(' 1000 \n') == 1000)
self.assertTrue(integer_type.decode(' -19 \n') == -19)
self.assertTrue(integer_type.decode(' 0\n') == 0)
self.assertRaises(XMLSchemaDecodeError, integer_type.decode, ' 1000.0 \n')
self.assertRaises(XMLSchemaDecodeError, integer_type.decode, ' alpha \n')
self.assertRaises(XMLSchemaValidationError, self.types['byte'].decode, ' 257 \n')
self.assertRaises(XMLSchemaValidationError, self.types['unsignedInt'].decode, ' -1')
def test_integer_encode(self):
integer_type = self.types['integer']
self.assertTrue(integer_type.encode(1000) == '1000')
self.assertTrue(integer_type.encode(-19) == '-19')
self.assertTrue(integer_type.encode(0) == '0')
self.assertRaises(XMLSchemaEncodeError, integer_type.encode, 10.1)
self.assertRaises(XMLSchemaEncodeError, integer_type.encode, 'alpha')
self.assertRaises(XMLSchemaValidationError, self.types['unsignedInt'].decode, ' -1')
def test_float_decode(self):
self.assertTrue(self.types['float'].decode(' 1000.1 \n') == 1000.10)
self.assertTrue(self.types['float'].decode(' -19 \n') == -19.0)
self.assertTrue(self.types['double'].decode(' 0.0001\n') == 0.0001)
self.assertRaises(XMLSchemaValidationError, self.types['float'].decode, ' true ')
self.assertRaises(XMLSchemaValidationError, self.types['double'].decode, ' alpha \n')
def test_float_encode(self):
float_type = self.types['float']
self.assertTrue(float_type.encode(1000.0) == '1000.0')
self.assertTrue(float_type.encode(-19.0) == '-19.0')
self.assertTrue(float_type.encode(0.0) == '0.0')
self.assertRaises(XMLSchemaEncodeError, float_type.encode, True)
self.assertRaises(XMLSchemaEncodeError, float_type.encode, 'alpha')
def test_time_type(self):
time_type = self.types['time']
self.assertTrue(time_type.is_valid('14:35:00'))
self.assertTrue(time_type.is_valid('14:35:20.5345'))
self.assertTrue(time_type.is_valid('14:35:00-01:00'))
self.assertTrue(time_type.is_valid('14:35:00Z'))
self.assertTrue(time_type.is_valid('00:00:00'))
self.assertTrue(time_type.is_valid('24:00:00'))
self.assertFalse(time_type.is_valid('4:20:00'))
self.assertFalse(time_type.is_valid('14:35:0'))
self.assertFalse(time_type.is_valid('14:35'))
self.assertFalse(time_type.is_valid('14:35.5:00'))
def test_datetime_type(self):
datetime_type = self.types['dateTime']
self.assertTrue(datetime_type.is_valid('2007-05-10T14:35:00'))
self.assertTrue(datetime_type.is_valid('2007-05-10T14:35:20.6'))
self.assertTrue(datetime_type.is_valid('2007-05-10T14:35:00-03:00'))
self.assertTrue(datetime_type.is_valid('2007-05-10T14:35:00Z'))
self.assertFalse(datetime_type.is_valid('2007-05-10T14:35'))
self.assertFalse(datetime_type.is_valid('2007-05-10t14:35:00'))
self.assertFalse(datetime_type.is_valid('2007-05-1014:35:00'))
self.assertFalse(datetime_type.is_valid('07-05-10T14:35:00'))
self.assertFalse(datetime_type.is_valid('2007-05-10'))
# Issue #85
self.assertTrue(datetime_type.is_valid('2018-10-10T13:57:53.0702116-04:00'))
def test_date_type(self):
date_type = self.types['date']
self.assertTrue(date_type.is_valid('2012-05-31'))
self.assertTrue(date_type.is_valid('-0065-10-15'))
self.assertTrue(date_type.is_valid('12012-05-31'))
self.assertTrue(date_type.is_valid('2012-05-31-05:00'))
self.assertTrue(date_type.is_valid('2015-06-30Z'))
self.assertFalse(date_type.is_valid('12-05-31'))
self.assertFalse(date_type.is_valid('2012-5-31'))
self.assertFalse(date_type.is_valid('31-05-2012'))
self.assertFalse(date_type.is_valid('1999-06-31'))
self.assertFalse(date_type.is_valid('+2012-05-31'))
self.assertFalse(date_type.is_valid(''))
def test_year_zero(self):
self.assertFalse(self.types['date'].is_valid('0000-01-01'))
def test_g_year_type(self):
g_year_type = self.types['gYear']
self.assertTrue(g_year_type.is_valid('2007'))
self.assertTrue(g_year_type.is_valid('2013-01:00'))
self.assertTrue(g_year_type.is_valid('102013-01:00'))
self.assertTrue(g_year_type.is_valid('0821'))
self.assertTrue(g_year_type.is_valid('0014'))
self.assertTrue(g_year_type.is_valid('-0044'))
self.assertTrue(g_year_type.is_valid('13999'))
self.assertFalse(g_year_type.is_valid('045'))
self.assertFalse(g_year_type.is_valid('800'))
self.assertFalse(g_year_type.is_valid(''))
def test_g_year_month_type(self):
g_year_month_type = self.types['gYearMonth']
self.assertTrue(g_year_month_type.is_valid('2010-07'))
self.assertTrue(g_year_month_type.is_valid('2020-01-05:00'))
self.assertFalse(g_year_month_type.is_valid('99-02'))
self.assertFalse(g_year_month_type.is_valid('1999'))
self.assertFalse(g_year_month_type.is_valid('1995-3'))
self.assertFalse(g_year_month_type.is_valid('1860-14'))
self.assertFalse(g_year_month_type.is_valid(''))
def test_g_month_type(self):
g_month_type = self.types['gMonth']
self.assertTrue(g_month_type.is_valid('--08'))
self.assertTrue(g_month_type.is_valid('--05-03:00'))
self.assertFalse(g_month_type.is_valid('03'))
self.assertFalse(g_month_type.is_valid('3'))
self.assertFalse(g_month_type.is_valid('--13'))
self.assertFalse(g_month_type.is_valid('--3'))
self.assertFalse(g_month_type.is_valid(''))
def test_g_month_day_type(self):
g_month_day_type = self.types['gMonthDay']
self.assertTrue(g_month_day_type.is_valid('--12-24'))
self.assertTrue(g_month_day_type.is_valid('--04-25Z'))
self.assertFalse(g_month_day_type.is_valid('12-24'))
self.assertFalse(g_month_day_type.is_valid('--11-31'))
self.assertFalse(g_month_day_type.is_valid('--2-11'))
self.assertFalse(g_month_day_type.is_valid('--02-1'))
self.assertFalse(g_month_day_type.is_valid(''))
def test_g_day_type(self):
g_day_type = self.types['gDay']
self.assertTrue(g_day_type.is_valid('---19'))
self.assertTrue(g_day_type.is_valid('---07'))
self.assertFalse(g_day_type.is_valid('---32'))
self.assertFalse(g_day_type.is_valid('07'))
self.assertFalse(g_day_type.is_valid('--07'))
self.assertFalse(g_day_type.is_valid('---7'))
self.assertFalse(g_day_type.is_valid(''))
def test_duration_type(self):
duration_type = self.types['duration']
self.assertTrue(duration_type.is_valid('-P809YT3H5M5S'))
self.assertTrue(duration_type.is_valid('P5Y7M20DT3H5M5S'))
self.assertTrue(duration_type.is_valid('P1DT6H'))
self.assertTrue(duration_type.is_valid('P15M'))
self.assertTrue(duration_type.is_valid('PT30M'))
self.assertTrue(duration_type.is_valid('P0Y15M0D'))
self.assertTrue(duration_type.is_valid('P0Y'))
self.assertTrue(duration_type.is_valid('-P10D'))
self.assertTrue(duration_type.is_valid('PT5M30.5S'))
self.assertTrue(duration_type.is_valid('PT10.5S'))
self.assertFalse(duration_type.is_valid('P-50M'))
self.assertFalse(duration_type.is_valid('P50MT'))
self.assertFalse(duration_type.is_valid('P1YM7D'))
self.assertFalse(duration_type.is_valid('P10.8Y'))
self.assertFalse(duration_type.is_valid('P3D5H'))
self.assertFalse(duration_type.is_valid('1Y'))
self.assertFalse(duration_type.is_valid('P3D4M'))
self.assertFalse(duration_type.is_valid('P'))
self.assertFalse(duration_type.is_valid('PT10.S'))
self.assertFalse(duration_type.is_valid(''))
class TestXsd11BuiltinTypes(TestXsd10BuiltinTypes):
@classmethod
def setUpClass(cls):
cls.schema_class = XMLSchema11
cls.types = XMLSchema11.builtin_types()
@classmethod
def tearDownClass(cls):
XMLSchema11.meta_schema.clear()
def test_year_zero(self):
self.assertTrue(self.types['date'].is_valid('0000-01-01'))
def test_date_time_stamp(self):
date_time_stamp_type = self.types['dateTimeStamp']
self.assertTrue(date_time_stamp_type.is_valid('2003-10-20T16:50:08-03:00'))
self.assertTrue(date_time_stamp_type.is_valid('2003-10-20T16:50:08Z'))
self.assertFalse(date_time_stamp_type.is_valid('2003-10-20T16:50:08'))
self.assertFalse(date_time_stamp_type.is_valid('1980-02-28T17:09:20.1'))
self.assertFalse(date_time_stamp_type.is_valid(''))
def test_day_time_duration_type(self):
day_time_duration_type = self.types['dayTimeDuration']
self.assertTrue(day_time_duration_type.is_valid('P7DT15H40M0S'))
self.assertTrue(day_time_duration_type.is_valid('-P10D'))
self.assertTrue(day_time_duration_type.is_valid('P0D'))
self.assertTrue(day_time_duration_type.is_valid('PT13M'))
self.assertTrue(day_time_duration_type.is_valid('P0DT17M'))
self.assertTrue(day_time_duration_type.is_valid('PT3H20M10.5S'))
self.assertFalse(day_time_duration_type.is_valid('PT5D'))
self.assertFalse(day_time_duration_type.is_valid('PT3HM10S'))
self.assertFalse(day_time_duration_type.is_valid('P7DT'))
self.assertFalse(day_time_duration_type.is_valid('PT3H1.4M'))
self.assertFalse(day_time_duration_type.is_valid('P-5D'))
self.assertFalse(day_time_duration_type.is_valid('P1D5H'))
self.assertFalse(day_time_duration_type.is_valid('PT10M21.S'))
self.assertFalse(day_time_duration_type.is_valid('P'))
self.assertFalse(day_time_duration_type.is_valid(''))
def test_year_month_duration_type(self):
year_month_duration_type = self.types['yearMonthDuration']
self.assertTrue(year_month_duration_type.is_valid('P3Y4M'))
self.assertTrue(year_month_duration_type.is_valid('P15M'))
self.assertTrue(year_month_duration_type.is_valid('P0Y'))
self.assertTrue(year_month_duration_type.is_valid('P0Y23M'))
self.assertTrue(year_month_duration_type.is_valid('-P8Y'))
self.assertFalse(year_month_duration_type.is_valid('3Y4M'))
self.assertFalse(year_month_duration_type.is_valid('P6M1Y'))
self.assertFalse(year_month_duration_type.is_valid('P'))
self.assertFalse(year_month_duration_type.is_valid('P1Y6M15D'))
self.assertFalse(year_month_duration_type.is_valid('P1.2Y'))
self.assertFalse(year_month_duration_type.is_valid('P2YM'))
self.assertFalse(year_month_duration_type.is_valid('P-1Y'))
self.assertFalse(year_month_duration_type.is_valid(''))
if __name__ == '__main__':
import platform
header_template = "Test xmlschema's XSD builtins with Python {} on {}"
header = header_template.format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{0}'.format("*" * len(header), header))
unittest.main()
|
|
import urllib2
import json
import hashlib
import hmac
import time
from django.conf import settings
# GET /api/v1/account/balance HTTP/1.1
# Accept: */*
# User-Agent: Python
# ACCESS_KEY: <YOUR-API-KEY>
# ACCESS_SIGNATURE: <YOUR-COMPUTED-SIGNATURE>
# ACCESS_NONCE: <YOUR-UPDATED-NONCE>
# Connection: close
# Host: coinbase.com
class CoinbaseV1():
def refresh_token(
self,
access_token,
refresh_token,
app_client_id,
app_client_secret):
opener = urllib2.build_opener()
refresh_body = {
'grant_type':'refresh_token',
'access_token':access_token,
'refresh_token':refresh_token,
'client_id':app_client_id,
'client_secret':app_client_secret
}
# if settings.DEBUG == 'true':
# print json.dumps(refresh_body)
refresh_response = opener.open(urllib2.Request(
'https://coinbase.com/oauth/token?access_token={0}'.format(access_token),
json.dumps(refresh_body),
{'Content-Type': 'application/json'}))
response_string = refresh_response.read()
return json.loads(response_string)
# this version is used for no oauth or api requests
def get_and_post_http(
self,
url,
body=None):
opener = urllib2.build_opener()
try:
if body:
print 'POST body: {0}'.format(body)
response_stream = opener.open(urllib2.Request('{0}'.format(url),body,{'Content-Type': 'application/json'}))
#return the valid access token, this will be updated if needed to be refreshed
response_string = response_stream.read()
response_object = json.loads(response_string)
response_object['error_code'] = None
return response_object
except urllib2.HTTPError as e:
return {'error_code':e.code,'message':'HTTP Error'}
# this really neads to be refactored to always return an object
# so that the client refresh can have control over the model.
# the behvior here is to act as a post when a body is present and get when None
def get_and_post_http_oauth(
self,
url,
access_token,
refresh_token,
body=None):
opener = urllib2.build_opener()
try:
print 'url: {0}?access_token={1}'.format(url,access_token)
if body:
print 'POST body: {0}'.format(body)
response_stream = opener.open(urllib2.Request('{0}?access_token={1}'.format(url,access_token),body,{'Content-Type': 'application/json'}))
#return the valid access token, this will be updated if needed to be refreshed
response_string = response_stream.read()
response_object = json.loads(response_string)
response_object['access_token'] = access_token
response_object['refresh_token'] = refresh_token
response_object['error_code'] = None
return response_object
except urllib2.HTTPError as e:
return {'error_code':e.code,'message':'HTTP Error'}
def get_and_post_http_api(
self,
url,
body=None,
access_key=None,
access_secret=None):
opener = urllib2.build_opener()
nonce = int(time.time() * 1e6)
message = str(nonce) + url + ('' if body is None else body)
signature = hmac.new(access_secret, message, hashlib.sha256).hexdigest()
opener.addheaders = [('ACCESS_KEY', access_key),
('ACCESS_SIGNATURE', signature),
('ACCESS_NONCE', nonce)]
try:
response = opener.open(urllib2.Request(url,body,{'Content-Type': 'application/json'}))
return response
except urllib2.HTTPError as e:
return e
def get_json_api(self, url,
body,
access_key,
access_secret):
response = self.get_http(url,body,access_key,access_secret)
json_response = response.read()
return json.loads(json_response)
def post_json_api(self, url, data_obj,
access_key,
access_secret):
response = self.get_http(url,json.dumps(data_obj),access_key,access_secret)
json_response = response.read()
return json.loads(json_response)
# Redirect the user to this page
# https://coinbase.com/oauth/authorize?response_type=code&client_id=YOUR_CLIENT_ID&redirect_uri=YOUR_CALLBACK_URL
def get_oauth_redirect(self, client_id, client_callback):
return 'https://coinbase.com/oauth/authorize?response_type=code&client_id={0}&redirect_uri={1}'.format(
client_id,
client_callback)
# Initiate a POST request to get the access token
# https://coinbase.com/oauth/token?grant_type=authorization_code&code=CODE&redirect_uri=YOUR_CALLBACK_URL&client_id=CLIENT_ID&client_secret=CLIENT_SECRET
def post_oauth_response(self, code, client_callback, client_id, client_secret):
post_url = 'https://coinbase.com/oauth/token?grant_type=authorization_code' \
'&code={0}&' \
'redirect_uri={1}&' \
'client_id={2}' \
'&client_secret={3}'.format(
code,
client_callback,
client_id,
client_secret
)
oauth_response = self.get_and_post_http(post_url, {})
# Response containing the 'access_token'
# {
# "access_token": "...",
# "refresh_token": "...",
# "token_type": "bearer",
# "expire_in": 7200,
# "scope": "universal"
# }
return oauth_response
def make_button(self,
button_request,
access_token,
refresh_token,
client_id,
client_secret):
#refres the token
refresh_response = self.refresh_token(
access_token,
refresh_token,
client_id,
client_secret)
#this has a body so it will POST
# use the new token
button_response = self.get_and_post_http_oauth('https://coinbase.com/api/v1/buttons',
refresh_response['access_token'],
refresh_response['refresh_token'],
json.dumps(button_request))
button_response['access_token'] = refresh_response['access_token']
button_response['refresh_token'] = refresh_response['refresh_token']
return button_response
# # Request
# GET https://api.coinbase.com/v1/users
#
# # Response
# {
# "users": [
# {
# "user": {
# "id": "512db383f8182bd24d000001",
# "name": "User One",
# "email": "user1@example.com",
# "time_zone": "Pacific Time (US & Canada)",
# "native_currency": "USD",
# "balance": {
# "amount": "49.76000000",
# "currency": "BTC"
# },
# "merchant": {
# "company_name": "Company Name, Inc.",
# "logo": {
# "small": "http://smalllogo.example",
# "medium": "http://mediumlogo.example",
# "url": "http://logo.example"
# }
# },
# "buy_level": 1,
# "sell_level": 1,
# "buy_limit": {
# "amount": "10.00000000",
# "currency": "BTC"
# },
# "sell_limit": {
# "amount": "100.00000000",
# "currency": "BTC"
# }
# }
# }
# ]
# }
def get_oauth_users(self,
access_token,
refresh_token,
client_id,
client_secret):
#refres the token
refresh_response = self.refresh_token(
access_token,
refresh_token,
client_id,
client_secret)
#this has a no body so it will GET
response_object = self.get_and_post_http_oauth(
'https://api.coinbase.com/v1/users',
refresh_response['access_token'],
refresh_response['refresh_token'])
response_object['access_token'] = refresh_response['access_token']
response_object['refresh_token'] = refresh_response['refresh_token']
return response_object
|
|
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import urllib.parse as urlparse
import glance_store as store_api
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
import glance.db as db_api
from glance.i18n import _LE, _LW
from glance import scrubber
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RESTRICTED_URI_SCHEMAS = frozenset(['file', 'filesystem', 'swift+config'])
def check_reserved_stores(enabled_stores):
for store in enabled_stores:
if store.startswith("os_glance_"):
return True
return False
def safe_delete_from_backend(context, image_id, location):
"""
Given a location, delete an image from the store and
update location status to db.
This function try to handle all known exceptions which might be raised
by those calls on store and DB modules in its implementation.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
try:
if CONF.enabled_backends:
backend = location['metadata'].get('store')
ret = store_api.delete(location['url'],
backend,
context=context)
else:
ret = store_api.delete_from_backend(location['url'],
context=context)
location['status'] = 'deleted'
if 'id' in location:
db_api.get_api().image_location_delete(context, image_id,
location['id'], 'deleted')
return ret
except store_api.NotFound:
msg = ("The image data for %(iid)s was not found in the store. "
"The image record has been updated to reflect "
"this." % {'iid': image_id})
LOG.warning(msg)
except store_api.StoreDeleteNotSupported as e:
LOG.warning(encodeutils.exception_to_unicode(e))
except store_api.UnsupportedBackend:
exc_type = sys.exc_info()[0].__name__
msg = (_LE('Failed to delete image %(image_id)s from store: %(exc)s') %
dict(image_id=image_id, exc=exc_type))
LOG.error(msg)
def schedule_delayed_delete_from_backend(context, image_id, location):
"""
Given a location, schedule the deletion of an image location and
update location status to db.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
db_queue = scrubber.get_scrub_queue()
context = None
ret = db_queue.add_location(image_id, location)
if ret:
location['status'] = 'pending_delete'
if 'id' in location:
# NOTE(zhiyan): New added image location entry will has no 'id'
# field since it has not been saved to DB.
db_api.get_api().image_location_delete(context, image_id,
location['id'],
'pending_delete')
else:
db_api.get_api().image_location_add(context, image_id, location)
return ret
def delete_image_location_from_backend(context, image_id, location):
"""
Given a location, immediately or schedule the deletion of an image
location and update location status to db.
:param context: The request context
:param image_id: The image identifier
:param location: The image location entry
"""
deleted = False
if CONF.delayed_delete:
deleted = schedule_delayed_delete_from_backend(context,
image_id, location)
if not deleted:
# NOTE(zhiyan) If image metadata has not been saved to DB
# such as uploading process failure then we can't use
# location status mechanism to support image pending delete.
safe_delete_from_backend(context, image_id, location)
def validate_external_location(uri):
"""
Validate if URI of external location are supported.
Only over non-local store types are OK, i.e. Swift,
HTTP. Note the absence of 'file://' for security reasons,
see LP bug #942118, 1400966, 'swift+config://' is also
absent for security reasons, see LP bug #1334196.
:param uri: The URI of external image location.
:returns: Whether given URI of external image location are OK.
"""
if not uri:
return False
# TODO(zhiyan): This function could be moved to glance_store.
# TODO(gm): Use a whitelist of allowed schemes
scheme = urlparse.urlparse(uri).scheme
known_schemes = store_api.get_known_schemes()
if CONF.enabled_backends:
known_schemes = store_api.get_known_schemes_for_multi_store()
return (scheme in known_schemes and
scheme not in RESTRICTED_URI_SCHEMAS)
def _get_store_id_from_uri(uri):
scheme = urlparse.urlparse(uri).scheme
location_map = store_api.location.SCHEME_TO_CLS_BACKEND_MAP
url_matched = False
if scheme not in location_map:
LOG.warning("Unknown scheme '%(scheme)s' found in uri '%(uri)s'", {
'scheme': scheme, 'uri': uri})
return
for store in location_map[scheme]:
store_instance = location_map[scheme][store]['store']
url_prefix = store_instance.url_prefix
if url_prefix and uri.startswith(url_prefix):
url_matched = True
break
if url_matched:
return u"%s" % store
else:
LOG.warning("Invalid location uri %s", uri)
return
def update_store_in_locations(context, image, image_repo):
store_updated = False
for loc in image.locations:
if (not loc['metadata'].get(
'store') or loc['metadata'].get(
'store') not in CONF.enabled_backends):
if loc['url'].startswith("cinder://"):
_update_cinder_location_and_store_id(context, loc)
store_id = _get_store_id_from_uri(loc['url'])
if store_id:
if 'store' in loc['metadata']:
old_store = loc['metadata']['store']
if old_store != store_id:
LOG.debug("Store '%(old)s' has changed to "
"'%(new)s' by operator, updating "
"the same in the location of image "
"'%(id)s'", {'old': old_store,
'new': store_id,
'id': image.image_id})
store_updated = True
loc['metadata']['store'] = store_id
if store_updated:
image_repo.save(image)
def _update_cinder_location_and_store_id(context, loc):
"""Update store location of legacy images
While upgrading from single cinder store to multiple stores,
the images having a store configured with a volume type matching
the image-volume's type will be migrated/associated to that store
and their location url will be updated respectively to the new format
i.e. cinder://store-id/volume-id
If there is no store configured for the image, the location url will
not be updated.
"""
uri = loc['url']
volume_id = loc['url'].split("/")[-1]
scheme = urlparse.urlparse(uri).scheme
location_map = store_api.location.SCHEME_TO_CLS_BACKEND_MAP
if scheme not in location_map:
LOG.warning(_LW("Unknown scheme '%(scheme)s' found in uri '%(uri)s'"),
{'scheme': scheme, 'uri': uri})
return
for store in location_map[scheme]:
store_instance = location_map[scheme][store]['store']
if store_instance.is_image_associated_with_store(context, volume_id):
url_prefix = store_instance.url_prefix
loc['url'] = "%s/%s" % (url_prefix, volume_id)
loc['metadata']['store'] = "%s" % store
return
LOG.warning(_LW("Not able to update location url '%s' of legacy image "
"due to unknown issues."), uri)
def get_updated_store_location(locations):
for loc in locations:
store_id = _get_store_id_from_uri(loc['url'])
if store_id:
loc['metadata']['store'] = store_id
return locations
def get_dir_separator():
separator = ''
staging_dir = "file://%s" % getattr(
CONF, 'os_glance_staging_store').filesystem_store_datadir
if not staging_dir.endswith('/'):
separator = '/'
return separator, staging_dir
|
|
from collections.abc import Iterable, Mapping
from numbers import Real, Integral
from pathlib import Path
import subprocess
import sys
import warnings
from xml.etree import ElementTree as ET
import numpy as np
import openmc
import openmc.checkvalue as cv
from openmc._xml import clean_indentation
from openmc.mixin import IDManagerMixin
_BASES = ['xy', 'xz', 'yz']
_SVG_COLORS = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50)
}
class Plot(IDManagerMixin):
"""Definition of a finite region of space to be plotted.
OpenMC is capable of generating two-dimensional slice plots and
three-dimensional voxel plots. Colors that are used in plots can be given as
RGB tuples, e.g. (255, 255, 255) would be white, or by a string indicating a
valid `SVG color <https://www.w3.org/TR/SVG/types.html#ColorKeywords>`_.
Parameters
----------
plot_id : int
Unique identifier for the plot
name : str
Name of the plot
Attributes
----------
id : int
Unique identifier
name : str
Name of the plot
width : Iterable of float
Width of the plot in each basis direction
pixels : Iterable of int
Number of pixels to use in each basis direction
origin : tuple or list of ndarray
Origin (center) of the plot
filename :
Path to write the plot to
color_by : {'cell', 'material'}
Indicate whether the plot should be colored by cell or by material
type : {'slice', 'voxel'}
The type of the plot
basis : {'xy', 'xz', 'yz'}
The basis directions for the plot
background : Iterable of int or str
Color of the background
mask_components : Iterable of openmc.Cell or openmc.Material
The cells or materials to plot
mask_background : Iterable of int or str
Color to apply to all cells/materials not listed in mask_components
colors : dict
Dictionary indicating that certain cells/materials (keys) should be
displayed with a particular color.
level : int
Universe depth to plot at
meshlines : dict
Dictionary defining type, id, linewidth and color of a regular mesh
to be plotted on top of a plot
"""
next_id = 1
used_ids = set()
def __init__(self, plot_id=None, name=''):
# Initialize Plot class attributes
self.id = plot_id
self.name = name
self._width = [4.0, 4.0]
self._pixels = [400, 400]
self._origin = [0., 0., 0.]
self._filename = None
self._color_by = 'cell'
self._type = 'slice'
self._basis = 'xy'
self._background = None
self._mask_components = None
self._mask_background = None
self._colors = {}
self._level = None
self._meshlines = None
@property
def name(self):
return self._name
@property
def width(self):
return self._width
@property
def pixels(self):
return self._pixels
@property
def origin(self):
return self._origin
@property
def filename(self):
return self._filename
@property
def color_by(self):
return self._color_by
@property
def type(self):
return self._type
@property
def basis(self):
return self._basis
@property
def background(self):
return self._background
@property
def mask_components(self):
return self._mask_components
@property
def mask_background(self):
return self._mask_background
@property
def colors(self):
return self._colors
@property
def level(self):
return self._level
@property
def meshlines(self):
return self._meshlines
@name.setter
def name(self, name):
cv.check_type('plot name', name, str)
self._name = name
@width.setter
def width(self, width):
cv.check_type('plot width', width, Iterable, Real)
cv.check_length('plot width', width, 2, 3)
self._width = width
@origin.setter
def origin(self, origin):
cv.check_type('plot origin', origin, Iterable, Real)
cv.check_length('plot origin', origin, 3)
self._origin = origin
@pixels.setter
def pixels(self, pixels):
cv.check_type('plot pixels', pixels, Iterable, Integral)
cv.check_length('plot pixels', pixels, 2, 3)
for dim in pixels:
cv.check_greater_than('plot pixels', dim, 0)
self._pixels = pixels
@filename.setter
def filename(self, filename):
cv.check_type('filename', filename, str)
self._filename = filename
@color_by.setter
def color_by(self, color_by):
cv.check_value('plot color_by', color_by, ['cell', 'material'])
self._color_by = color_by
@type.setter
def type(self, plottype):
cv.check_value('plot type', plottype, ['slice', 'voxel'])
self._type = plottype
@basis.setter
def basis(self, basis):
cv.check_value('plot basis', basis, _BASES)
self._basis = basis
@background.setter
def background(self, background):
cv.check_type('plot background', background, Iterable)
if isinstance(background, str):
if background.lower() not in _SVG_COLORS:
raise ValueError("'{}' is not a valid color.".format(background))
else:
cv.check_length('plot background', background, 3)
for rgb in background:
cv.check_greater_than('plot background', rgb, 0, True)
cv.check_less_than('plot background', rgb, 256)
self._background = background
@colors.setter
def colors(self, colors):
cv.check_type('plot colors', colors, Mapping)
for key, value in colors.items():
cv.check_type('plot color key', key, (openmc.Cell, openmc.Material))
cv.check_type('plot color value', value, Iterable)
if isinstance(value, str):
if value.lower() not in _SVG_COLORS:
raise ValueError("'{}' is not a valid color.".format(value))
else:
cv.check_length('plot color (RGB)', value, 3)
for component in value:
cv.check_type('RGB component', component, Real)
cv.check_greater_than('RGB component', component, 0, True)
cv.check_less_than('RGB component', component, 255, True)
self._colors = colors
@mask_components.setter
def mask_components(self, mask_components):
cv.check_type('plot mask components', mask_components, Iterable,
(openmc.Cell, openmc.Material))
self._mask_components = mask_components
@mask_background.setter
def mask_background(self, mask_background):
cv.check_type('plot mask background', mask_background, Iterable)
if isinstance(mask_background, str):
if mask_background.lower() not in _SVG_COLORS:
raise ValueError("'{}' is not a valid color.".format(mask_background))
else:
cv.check_length('plot mask_background', mask_background, 3)
for rgb in mask_background:
cv.check_greater_than('plot mask background', rgb, 0, True)
cv.check_less_than('plot mask background', rgb, 256)
self._mask_background = mask_background
@level.setter
def level(self, plot_level):
cv.check_type('plot level', plot_level, Integral)
cv.check_greater_than('plot level', plot_level, 0, equality=True)
self._level = plot_level
@meshlines.setter
def meshlines(self, meshlines):
cv.check_type('plot meshlines', meshlines, dict)
if 'type' not in meshlines:
msg = 'Unable to set on plot the meshlines "{0}" which ' \
'does not have a "type" key'.format(meshlines)
raise ValueError(msg)
elif meshlines['type'] not in ['tally', 'entropy', 'ufs', 'cmfd']:
msg = 'Unable to set the meshlines with ' \
'type "{0}"'.format(meshlines['type'])
raise ValueError(msg)
if 'id' in meshlines:
cv.check_type('plot meshlines id', meshlines['id'], Integral)
cv.check_greater_than('plot meshlines id', meshlines['id'], 0,
equality=True)
if 'linewidth' in meshlines:
cv.check_type('plot mesh linewidth', meshlines['linewidth'], Integral)
cv.check_greater_than('plot mesh linewidth', meshlines['linewidth'],
0, equality=True)
if 'color' in meshlines:
cv.check_type('plot meshlines color', meshlines['color'], Iterable,
Integral)
cv.check_length('plot meshlines color', meshlines['color'], 3)
for rgb in meshlines['color']:
cv.check_greater_than('plot meshlines color', rgb, 0, True)
cv.check_less_than('plot meshlines color', rgb, 256)
self._meshlines = meshlines
def __repr__(self):
string = 'Plot\n'
string += '{: <16}=\t{}\n'.format('\tID', self._id)
string += '{: <16}=\t{}\n'.format('\tName', self._name)
string += '{: <16}=\t{}\n'.format('\tFilename', self._filename)
string += '{: <16}=\t{}\n'.format('\tType', self._type)
string += '{: <16}=\t{}\n'.format('\tBasis', self._basis)
string += '{: <16}=\t{}\n'.format('\tWidth', self._width)
string += '{: <16}=\t{}\n'.format('\tOrigin', self._origin)
string += '{: <16}=\t{}\n'.format('\tPixels', self._origin)
string += '{: <16}=\t{}\n'.format('\tColor by', self._color_by)
string += '{: <16}=\t{}\n'.format('\tBackground', self._background)
string += '{: <16}=\t{}\n'.format('\tMask components',
self._mask_components)
string += '{: <16}=\t{}\n'.format('\tMask background',
self._mask_background)
string += '{: <16}=\t{}\n'.format('\tColors', self._colors)
string += '{: <16}=\t{}\n'.format('\tLevel', self._level)
string += '{: <16}=\t{}\n'.format('\tMeshlines', self._meshlines)
return string
@classmethod
def from_geometry(cls, geometry, basis='xy', slice_coord=0.):
"""Return plot that encompasses a geometry.
Parameters
----------
geometry : openmc.Geometry
The geometry the base the plot off of
basis : {'xy', 'xz', 'yz'}
The basis directions for the plot
slice_coord : float
The level at which the slice plot should be plotted. For example, if
the basis is 'xy', this would indicate the z value used in the
origin.
"""
cv.check_type('geometry', geometry, openmc.Geometry)
cv.check_value('basis', basis, _BASES)
# Decide which axes to keep
if basis == 'xy':
pick_index = (0, 1)
slice_index = 2
elif basis == 'yz':
pick_index = (1, 2)
slice_index = 0
elif basis == 'xz':
pick_index = (0, 2)
slice_index = 1
# Get lower-left and upper-right coordinates for desired axes
lower_left, upper_right = geometry.bounding_box
lower_left = lower_left[np.array(pick_index)]
upper_right = upper_right[np.array(pick_index)]
if np.any(np.isinf((lower_left, upper_right))):
raise ValueError('The geometry does not appear to be bounded '
'in the {} plane.'.format(basis))
plot = cls()
plot.origin = np.insert((lower_left + upper_right)/2,
slice_index, slice_coord)
plot.width = upper_right - lower_left
return plot
def colorize(self, geometry, seed=1):
"""Generate a color scheme for each domain in the plot.
This routine may be used to generate random, reproducible color schemes.
The colors generated are based upon cell/material IDs in the geometry.
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plot is defined
seed : Integral
The random number seed used to generate the color scheme
"""
cv.check_type('geometry', geometry, openmc.Geometry)
cv.check_type('seed', seed, Integral)
cv.check_greater_than('seed', seed, 1, equality=True)
# Get collections of the domains which will be plotted
if self.color_by == 'material':
domains = geometry.get_all_materials().values()
else:
domains = geometry.get_all_cells().values()
# Set the seed for the random number generator
np.random.seed(seed)
# Generate random colors for each feature
for domain in domains:
self.colors[domain] = np.random.randint(0, 256, (3,))
def highlight_domains(self, geometry, domains, seed=1,
alpha=0.5, background='gray'):
"""Use alpha compositing to highlight one or more domains in the plot.
This routine generates a color scheme and applies alpha compositing to
make all domains except the highlighted ones appear partially
transparent.
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plot is defined
domains : Iterable of openmc.Cell or openmc.Material
A collection of the domain IDs to highlight in the plot
seed : int
The random number seed used to generate the color scheme
alpha : float
The value between 0 and 1 to apply in alpha compisiting
background : 3-tuple of int or str
The background color to apply in alpha compisiting
"""
cv.check_type('domains', domains, Iterable,
(openmc.Cell, openmc.Material))
cv.check_type('alpha', alpha, Real)
cv.check_greater_than('alpha', alpha, 0., equality=True)
cv.check_less_than('alpha', alpha, 1., equality=True)
cv.check_type('background', background, Iterable)
# Get a background (R,G,B) tuple to apply in alpha compositing
if isinstance(background, str):
if background.lower() not in _SVG_COLORS:
raise ValueError("'{}' is not a valid color.".format(background))
background = _SVG_COLORS[background.lower()]
# Generate a color scheme
self.colorize(geometry, seed)
# Apply alpha compositing to the colors for all domains
# other than those the user wishes to highlight
for domain, color in self.colors.items():
if domain not in domains:
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
r, g, b = color
r = int(((1-alpha) * background[0]) + (alpha * r))
g = int(((1-alpha) * background[1]) + (alpha * g))
b = int(((1-alpha) * background[2]) + (alpha * b))
self._colors[domain] = (r, g, b)
def to_xml_element(self):
"""Return XML representation of the plot
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing plot data
"""
element = ET.Element("plot")
element.set("id", str(self._id))
if self._filename is not None:
element.set("filename", self._filename)
element.set("color_by", self._color_by)
element.set("type", self._type)
if self._type is 'slice':
element.set("basis", self._basis)
subelement = ET.SubElement(element, "origin")
subelement.text = ' '.join(map(str, self._origin))
subelement = ET.SubElement(element, "width")
subelement.text = ' '.join(map(str, self._width))
subelement = ET.SubElement(element, "pixels")
subelement.text = ' '.join(map(str, self._pixels))
if self._background is not None:
subelement = ET.SubElement(element, "background")
color = self._background
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
subelement.text = ' '.join(str(x) for x in color)
if self._colors:
for domain, color in sorted(self._colors.items(),
key=lambda x: x[0].id):
subelement = ET.SubElement(element, "color")
subelement.set("id", str(domain.id))
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
subelement.set("rgb", ' '.join(str(x) for x in color))
if self._mask_components is not None:
subelement = ET.SubElement(element, "mask")
subelement.set("components", ' '.join(
str(d.id) for d in self._mask_components))
color = self._mask_background
if color is not None:
if isinstance(color, str):
color = _SVG_COLORS[color.lower()]
subelement.set("background", ' '.join(
str(x) for x in color))
if self._level is not None:
subelement = ET.SubElement(element, "level")
subelement.text = str(self._level)
if self._meshlines is not None:
subelement = ET.SubElement(element, "meshlines")
subelement.set("meshtype", self._meshlines['type'])
if self._meshlines['id'] is not None:
subelement.set("id", str(self._meshlines['id']))
if self._meshlines['linewidth'] is not None:
subelement.set("linewidth", str(self._meshlines['linewidth']))
if self._meshlines['color'] is not None:
subelement.set("color", ' '.join(map(
str, self._meshlines['color'])))
return element
def to_ipython_image(self, openmc_exec='openmc', cwd='.',
convert_exec='convert'):
"""Render plot as an image
This method runs OpenMC in plotting mode to produce a bitmap image which
is then converted to a .png file and loaded in as an
:class:`IPython.display.Image` object. As such, it requires that your
model geometry, materials, and settings have already been exported to
XML.
Parameters
----------
openmc_exec : str
Path to OpenMC executable
cwd : str, optional
Path to working directory to run in
convert_exec : str, optional
Command that can convert PPM files into PNG files
Returns
-------
IPython.display.Image
Image generated
"""
from IPython.display import Image
# Create plots.xml
Plots([self]).export_to_xml()
# Run OpenMC in geometry plotting mode
openmc.plot_geometry(False, openmc_exec, cwd)
# Convert to .png
if self.filename is not None:
ppm_file = '{}.ppm'.format(self.filename)
else:
ppm_file = 'plot_{}.ppm'.format(self.id)
png_file = ppm_file.replace('.ppm', '.png')
subprocess.check_call([convert_exec, ppm_file, png_file])
return Image(png_file)
class Plots(cv.CheckedList):
"""Collection of Plots used for an OpenMC simulation.
This class corresponds directly to the plots.xml input file. It can be
thought of as a normal Python list where each member is a :class:`Plot`. It
behaves like a list as the following example demonstrates:
>>> xz_plot = openmc.Plot()
>>> big_plot = openmc.Plot()
>>> small_plot = openmc.Plot()
>>> p = openmc.Plots((xz_plot, big_plot))
>>> p.append(small_plot)
>>> small_plot = p.pop()
Parameters
----------
plots : Iterable of openmc.Plot
Plots to add to the collection
"""
def __init__(self, plots=None):
super().__init__(Plot, 'plots collection')
self._plots_file = ET.Element("plots")
if plots is not None:
self += plots
def append(self, plot):
"""Append plot to collection
Parameters
----------
plot : openmc.Plot
Plot to append
"""
super().append(plot)
def insert(self, index, plot):
"""Insert plot before index
Parameters
----------
index : int
Index in list
plot : openmc.Plot
Plot to insert
"""
super().insert(index, plot)
def colorize(self, geometry, seed=1):
"""Generate a consistent color scheme for each domain in each plot.
This routine may be used to generate random, reproducible color schemes.
The colors generated are based upon cell/material IDs in the geometry.
The color schemes will be consistent for all plots in "plots.xml".
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plots are defined
seed : Integral
The random number seed used to generate the color scheme
"""
for plot in self:
plot.colorize(geometry, seed)
def highlight_domains(self, geometry, domains, seed=1,
alpha=0.5, background='gray'):
"""Use alpha compositing to highlight one or more domains in the plot.
This routine generates a color scheme and applies alpha compositing to
make all domains except the highlighted ones appear partially
transparent.
Parameters
----------
geometry : openmc.Geometry
The geometry for which the plot is defined
domains : Iterable of openmc.Cell or openmc.Material
A collection of the domain IDs to highlight in the plot
seed : int
The random number seed used to generate the color scheme
alpha : float
The value between 0 and 1 to apply in alpha compisiting
background : 3-tuple of int or str
The background color to apply in alpha compisiting
"""
for plot in self:
plot.highlight_domains(geometry, domains, seed, alpha, background)
def _create_plot_subelements(self):
for plot in self:
xml_element = plot.to_xml_element()
if len(plot.name) > 0:
self._plots_file.append(ET.Comment(plot.name))
self._plots_file.append(xml_element)
def export_to_xml(self, path='plots.xml'):
"""Export plot specifications to an XML file.
Parameters
----------
path : str
Path to file to write. Defaults to 'plots.xml'.
"""
# Reset xml element tree
self._plots_file.clear()
self._create_plot_subelements()
# Clean the indentation in the file to be user-readable
clean_indentation(self._plots_file)
# Check if path is a directory
p = Path(path)
if p.is_dir():
p /= 'plots.xml'
# Write the XML Tree to the plots.xml file
tree = ET.ElementTree(self._plots_file)
tree.write(str(p), xml_declaration=True, encoding='utf-8')
|
|
from __future__ import absolute_import
from PySide import QtCore, QtGui
from ..wave_file import WaveTable, WaveFileError, ExportAsmFormatter, ExportCFormatter
from .dialogs import AsmExportDialog
from .widgets import WaveScene
class WaveDocumentEditor(QtGui.QGraphicsView):
"""
Individual wave editor documents.
"""
sequence_number = 0
wheel_step = 120
def __init__(self):
super(WaveDocumentEditor, self).__init__()
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.isUntitled = True
self.fileName = None
self.waveTable = None
self.scene = None
self.undoBuffer = []
def closeEvent(self, event):
if self.maybeSave():
event.accept()
else:
event.ignore()
def wheelEvent(self, event):
if event.modifiers() == QtCore.Qt.ControlModifier:
delta = event.delta()
if delta > 0:
self.zoomIn()
elif delta < 0:
self.zoomOut()
else:
super(WaveDocumentEditor, self).wheelEvent(event)
def newFile(self):
"""
Create a new file
"""
self.isUntitled = True
WaveDocumentEditor.sequence_number += 1
self.fileName = "wave{}.wave".format(self.sequence_number)
self.waveTable = WaveTable()
self.scene = WaveScene(self.waveTable)
self.setScene(self.scene)
self.fileChanged()
self.setWindowTitle(self.fileName + "[*]")
return True
def loadFile(self, file_name):
"""
Load an existing wave file.
:param file_name: The name of the file to load.
:return: True if successful; else False
"""
f = QtCore.QFile(file_name)
if not f.open(QtCore.QFile.ReadOnly):
QtGui.QMessageBox.warning(self, "Wave Editor", "Cannot read file: {}\n{}".format(
file_name, f.errorString()
))
return False
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
try:
self.waveTable = WaveTable.read(f)
self.setFileName(file_name)
self.scene = WaveScene(self.waveTable)
self.setScene(self.scene)
self.fileChanged()
except WaveFileError as ex:
QtGui.QApplication.restoreOverrideCursor()
QtGui.QMessageBox.warning(self, "Wave Editor", "Invalid wave file: {}\n{}".format(
file_name, ex
))
return False
finally:
QtGui.QApplication.restoreOverrideCursor()
return True
def save(self):
"""
Save the current file (will call save as if not previously saved)
:return: True if successful; else False
"""
if self.isUntitled:
return self.saveAs()
else:
return self.saveFile(self.fileName)
def saveAs(self):
"""
Save the current wave to a file as a named selected by the user.
:return: True if successful; else False
"""
file_name, _ = QtGui.QFileDialog.getSaveFileName(self, "Save As...", self.fileName)
if not file_name:
return False
return self.saveFile(file_name)
def saveFile(self, file_name):
f = QtCore.QFile(file_name)
if not f.open(QtCore.QFile.WriteOnly):
QtGui.QMessageBox.warning(self, "Wave Editor", "Cannot write file: {}\n{}".format(
file_name, f.errorString()
))
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
self.waveTable.write(f)
QtGui.QApplication.restoreOverrideCursor()
self.setFileName(file_name)
self.waveTable.clear_modified()
def maybeSave(self):
if self.waveTable.modified:
ret = QtGui.QMessageBox.warning(
self, "Wave Editor",
"'{}' has been modified.\nDo you want to save your "
"changes?".format(QtCore.QFileInfo(self.fileName).fileName()),
QtGui.QMessageBox.Save | QtGui.QMessageBox.Discard | QtGui.QMessageBox.Cancel
)
if ret == QtGui.QMessageBox.Save:
return self.save()
elif ret == QtGui.QMessageBox.Cancel:
return False
return True
def setFileName(self, file_name):
file_info = QtCore.QFileInfo(file_name)
self.fileName = file_info.canonicalFilePath()
self.isUntitled = False
self.setWindowModified(False)
self.setWindowTitle(file_info.fileName() + "[*]")
def fileChanged(self):
self.setWindowModified(self.waveTable.modified)
self.scene.render()
def generateWave(self, function):
self.undoBuffer.append(list(self.waveTable))
self.waveTable.insert(function())
self.fileChanged()
def mergeWave(self, function):
self.undoBuffer.append(list(self.waveTable))
self.waveTable.merge(function())
self.fileChanged()
def applyFunction(self, function):
self.undoBuffer.append(list(self.waveTable))
self.waveTable.insert(function(self.waveTable))
self.fileChanged()
def zoomIn(self):
self.scale(1.25, 1.25)
def zoomOut(self):
self.scale(0.8, 0.8)
def undo(self):
"""
Undo the last action performed on the wave.
"""
if self.undoBuffer:
wave = self.undoBuffer.pop()
self.waveTable.insert(wave)
self.fileChanged()
def exportAsm(self):
"""
Export wave table to ASM file
"""
output_file = QtCore.QFileInfo(self.fileName).baseName()
asmStyle, asmLabel = AsmExportDialog.getExportOptions(None, output_file)
if not asmLabel:
return
file_name, _ = QtGui.QFileDialog.getSaveFileName(self, "Export ASM as...", output_file + '.asm')
if file_name:
f = QtCore.QFile(file_name)
if not f.open(QtCore.QFile.WriteOnly):
QtGui.QMessageBox.warning(self, "Wave Editor", "Cannot write file: {}\n{}".format(
file_name, f.errorString()
))
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
ExportAsmFormatter(self.waveTable, asmLabel, asmStyle)(f)
QtGui.QApplication.restoreOverrideCursor()
def exportC(self):
"""
Export wave table to C file
"""
output_file = QtCore.QFileInfo(self.fileName).baseName()
file_name, _ = QtGui.QFileDialog.getSaveFileName(self, "Export C as...", output_file + '.c')
if file_name:
f = QtCore.QFile(file_name)
if not f.open(QtCore.QFile.WriteOnly):
QtGui.QMessageBox.warning(self, "Wave Editor", "Cannot write file: {}\n{}".format(
file_name, f.errorString()
))
QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)
ExportCFormatter(self.waveTable, output_file)(f)
QtGui.QApplication.restoreOverrideCursor()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.