gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""Bagging meta-estimator."""
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import itertools
import numbers
import numpy as np
from abc import ABCMeta, abstractmethod
from warnings import warn
from joblib import Parallel
from ._base import BaseEnsemble, _partition_estimators
from ..base import ClassifierMixin, RegressorMixin
from ..metrics import r2_score, accuracy_score
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, column_or_1d, deprecated
from ..utils import indices_to_mask
from ..utils.metaestimators import if_delegate_has_method
from ..utils.multiclass import check_classification_targets
from ..utils.random import sample_without_replacement
from ..utils.validation import has_fit_parameter, check_is_fitted, _check_sample_weight
from ..utils.fixes import delayed
__all__ = ["BaggingClassifier", "BaggingRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _generate_indices(random_state, bootstrap, n_population, n_samples):
"""Draw randomly sampled indices."""
# Draw sample indices
if bootstrap:
indices = random_state.randint(0, n_population, n_samples)
else:
indices = sample_without_replacement(
n_population, n_samples, random_state=random_state
)
return indices
def _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap_samples,
n_features,
n_samples,
max_features,
max_samples,
):
"""Randomly draw feature and sample indices."""
# Get valid random state
random_state = check_random_state(random_state)
# Draw indices
feature_indices = _generate_indices(
random_state, bootstrap_features, n_features, max_features
)
sample_indices = _generate_indices(
random_state, bootstrap_samples, n_samples, max_samples
)
return feature_indices, sample_indices
def _parallel_build_estimators(
n_estimators, ensemble, X, y, sample_weight, seeds, total_n_estimators, verbose
):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble._max_features
max_samples = ensemble._max_samples
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
support_sample_weight = has_fit_parameter(ensemble.base_estimator_, "sample_weight")
if not support_sample_weight and sample_weight is not None:
raise ValueError("The base estimator doesn't support sample weight")
# Build estimators
estimators = []
estimators_features = []
for i in range(n_estimators):
if verbose > 1:
print(
"Building estimator %d of %d for this parallel run (total %d)..."
% (i + 1, n_estimators, total_n_estimators)
)
random_state = seeds[i]
estimator = ensemble._make_estimator(append=False, random_state=random_state)
# Draw random feature, sample indices
features, indices = _generate_bagging_indices(
random_state,
bootstrap_features,
bootstrap,
n_features,
n_samples,
max_features,
max_samples,
)
# Draw samples, using sample weights, and then fit
if support_sample_weight:
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
if bootstrap:
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
else:
not_indices_mask = ~indices_to_mask(indices, n_samples)
curr_sample_weight[not_indices_mask] = 0
estimator.fit(X[:, features], y, sample_weight=curr_sample_weight)
else:
estimator.fit((X[indices])[:, features], y[indices])
estimators.append(estimator)
estimators_features.append(features)
return estimators, estimators_features
def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(X[:, features])
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += proba_estimator[
:, range(len(estimator.classes_))
]
else:
# Resort to voting
predictions = estimator.predict(X[:, features])
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))],
)
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing], -np.inf)
return log_proba
def _parallel_decision_function(estimators, estimators_features, X):
"""Private function used to compute decisions within a job."""
return sum(
estimator.decision_function(X[:, features])
for estimator, features in zip(estimators, estimators_features)
)
def _parallel_predict_regression(estimators, estimators_features, X):
"""Private function used to compute predictions within a job."""
return sum(
estimator.predict(X[:, features])
for estimator, features in zip(estimators, estimators_features)
)
class BaseBagging(BaseEnsemble, metaclass=ABCMeta):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(
self,
base_estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(base_estimator=base_estimator, n_estimators=n_estimators)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Fitted estimator.
"""
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
def _parallel_args(self):
return {}
def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, default=None
Argument to use instead of self.max_samples.
max_depth : int, default=None
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Fitted estimator.
"""
random_state = check_random_state(self.random_state)
# Convert data (X is required to be 2d and indexable)
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
multi_output=True,
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X, dtype=None)
# Remap output
n_samples = X.shape[0]
self._n_samples = n_samples
y = self._validate_y(y)
# Check parameters
self._validate_estimator()
if max_depth is not None:
self.base_estimator_.max_depth = max_depth
# Validate max_samples
if max_samples is None:
max_samples = self.max_samples
elif not isinstance(max_samples, numbers.Integral):
max_samples = int(max_samples * X.shape[0])
if not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
# Store validated integer row sampling value
self._max_samples = max_samples
# Validate max_features
if isinstance(self.max_features, numbers.Integral):
max_features = self.max_features
elif isinstance(self.max_features, float):
max_features = self.max_features * self.n_features_in_
else:
raise ValueError("max_features must be int or float")
if not (0 < max_features <= self.n_features_in_):
raise ValueError("max_features must be in (0, n_features]")
max_features = max(1, int(max_features))
# Store validated integer feature sampling value
self._max_features = max_features
# Other checks
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
n_more_estimators, self.n_jobs
)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
self._seeds = seeds
all_results = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
seeds[starts[i] : starts[i + 1]],
total_n_estimators,
verbose=self.verbose,
)
for i in range(n_jobs)
)
# Reduce
self.estimators_ += list(
itertools.chain.from_iterable(t[0] for t in all_results)
)
self.estimators_features_ += list(
itertools.chain.from_iterable(t[1] for t in all_results)
)
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
if len(y.shape) == 1 or y.shape[1] == 1:
return column_or_1d(y, warn=True)
else:
return y
def _get_estimators_indices(self):
# Get drawn indices along both sample and feature axes
for seed in self._seeds:
# Operations accessing random_state must be performed identically
# to those in `_parallel_build_estimators()`
feature_indices, sample_indices = _generate_bagging_indices(
seed,
self.bootstrap_features,
self.bootstrap,
self.n_features_in_,
self._n_samples,
self._max_features,
self._max_samples,
)
yield feature_indices, sample_indices
@property
def estimators_samples_(self):
"""
The subset of drawn samples for each base estimator.
Returns a dynamically generated list of indices identifying
the samples used for fitting each member of the ensemble, i.e.,
the in-bag samples.
Note: the list is re-created at each call to the property in order
to reduce the object memory footprint by not storing the sampling
data. Thus fetching the property may be slower than expected.
"""
return [sample_indices for _, sample_indices in self._get_estimators_indices()]
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `n_features_` was deprecated in version 1.0 and will be "
"removed in 1.2. Use `n_features_in_` instead."
)
@property
def n_features_(self):
return self.n_features_in_
class BaggingClassifier(ClassifierMixin, BaseBagging):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
.. versionadded:: 0.15
Parameters
----------
base_estimator : object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a
:class:`~sklearn.tree.DecisionTreeClassifier`.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator (with
replacement by default, see `bootstrap` for more details).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator (
without replacement by default, see `bootstrap_features` for more
details).
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement. If False, sampling
without replacement is performed.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error. Only available if bootstrap=True.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the random resampling of the original dataset
(sample wise and feature wise).
If the base estimator accepts a `random_state` attribute, a different
seed is generated for each instance in the ensemble.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
n_features_ : int
The number of features when :meth:`fit` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
See Also
--------
BaggingRegressor : A Bagging regressor.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
Examples
--------
>>> from sklearn.svm import SVC
>>> from sklearn.ensemble import BaggingClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=100, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = BaggingClassifier(base_estimator=SVC(),
... n_estimators=10, random_state=0).fit(X, y)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
def __init__(
self,
base_estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeClassifier())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
n_classes_ = self.n_classes_
predictions = np.zeros((n_samples, n_classes_))
for estimator, samples, features in zip(
self.estimators_, self.estimators_samples_, self.estimators_features_
):
# Create mask for OOB samples
mask = ~indices_to_mask(samples, n_samples)
if hasattr(estimator, "predict_proba"):
predictions[mask, :] += estimator.predict_proba(
(X[mask, :])[:, features]
)
else:
p = estimator.predict((X[mask, :])[:, features])
j = 0
for i in range(n_samples):
if mask[i]:
predictions[i, p[j]] += 1
j += 1
if (predictions.sum(axis=1) == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates."
)
oob_decision_function = predictions / predictions.sum(axis=1)[:, np.newaxis]
oob_score = accuracy_score(y, np.argmax(predictions, axis=1))
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score
def _validate_y(self, y):
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
predicted_probabilitiy = self.predict_proba(X)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)), axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_proba = Parallel(
n_jobs=n_jobs, verbose=self.verbose, **self._parallel_args()
)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
)
for i in range(n_jobs)
)
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute :term:`classes_`.
"""
check_is_fitted(self)
if hasattr(self.base_estimator_, "predict_log_proba"):
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
self.n_classes_,
)
for i in range(n_jobs)
)
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
return log_proba
else:
return np.log(self.predict_proba(X))
@if_delegate_has_method(delegate="base_estimator")
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : ndarray of shape (n_samples, k)
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
)
for i in range(n_jobs)
)
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
class BaggingRegressor(RegressorMixin, BaseBagging):
"""A Bagging regressor.
A Bagging regressor is an ensemble meta-estimator that fits base
regressors each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
.. versionadded:: 0.15
Parameters
----------
base_estimator : object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a
:class:`~sklearn.tree.DecisionTreeRegressor`.
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator (with
replacement by default, see `bootstrap` for more details).
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator (
without replacement by default, see `bootstrap_features` for more
details).
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement. If False, sampling
without replacement is performed.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error. Only available if bootstrap=True.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble. See :term:`the Glossary <warm_start>`.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the random resampling of the original dataset
(sample wise and feature wise).
If the base estimator accepts a `random_state` attribute, a different
seed is generated for each instance in the ensemble.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
n_features_ : int
The number of features when :meth:`fit` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
estimators_ : list of estimators
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_prediction_ : ndarray of shape (n_samples,)
Prediction computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_prediction_` might contain NaN. This attribute exists only
when ``oob_score`` is True.
See Also
--------
BaggingClassifier : A Bagging classifier.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
Examples
--------
>>> from sklearn.svm import SVR
>>> from sklearn.ensemble import BaggingRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_samples=100, n_features=4,
... n_informative=2, n_targets=1,
... random_state=0, shuffle=False)
>>> regr = BaggingRegressor(base_estimator=SVR(),
... n_estimators=10, random_state=0).fit(X, y)
>>> regr.predict([[0, 0, 0, 0]])
array([-2.8720...])
"""
def __init__(
self,
base_estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=None,
random_state=None,
verbose=0,
):
super().__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs
)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
)
for i in range(n_jobs)
)
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeRegressor())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples,))
n_predictions = np.zeros((n_samples,))
for estimator, samples, features in zip(
self.estimators_, self.estimators_samples_, self.estimators_features_
):
# Create mask for OOB samples
mask = ~indices_to_mask(samples, n_samples)
predictions[mask] += estimator.predict((X[mask, :])[:, features])
n_predictions[mask] += 1
if (n_predictions == 0).any():
warn(
"Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates."
)
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
self.oob_score_ = r2_score(y, predictions)
|
|
#!/usr/bin/env
"""
Chuckchi_Winds_NARR_model_prep.py
Retrieve NARR winds for one locations
Icy Cape Line, Ckip2
Latitude = 70.8401 Longitude = 163.2054
Filter NARR winds with a triangular filter (1/4, 1/2, 1/4) and output every 3hrs
Provide U, V
Save in EPIC NetCDF standard
python 2.7 only
"""
#System Stack
import datetime
import argparse
#Science Stack
import numpy as np
from netCDF4 import Dataset, num2date
# User Stack
import utilities.haversine as sphered
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
__keywords__ = 'NARR','station_1','3hr filtered', 'U,V','Winds', 'Chuckchi'
"""------------------------General Modules-------------------------------------------"""
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
def from_netcdf_1dsplice(infile, height_ind, lat_ind, lon_ind):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
print "Parameters available: "
print params
ncdata = ncutil.ncreadfile_dic_slice(nchandle, params, height_ind=height_ind, lat_ind=lat_ind, lon_ind=lon_ind)
ncutil.ncclose(nchandle)
return ncdata
def latlon_grid(infile):
nchandle = ncutil.ncopen(infile)
lat_lon = ncutil.get_geocoords(nchandle)
ncutil.ncclose(nchandle)
return (lat_lon)
def csvread(ifile):
date, time, uwnd, vwnd, atemp, bpress = [], [], [], [], [], []
with open(ifile, 'rb') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader) #skip header
""" DAT TIM WU WV AT BP """
for row in csv_reader:
try:
r0,r1,r2,r3,r4,r5,r6 = row[0].strip().split()
except ValueError:
r0,r1,r2,r3,r4,r5 = row[0].strip().split()
date.append(r0)
time.append(r1)
uwnd.append(r2)
vwnd.append(r3)
return {'DAT': np.array(date, int), 'TIM':np.array(time, float), 'WU':np.array(uwnd, float),\
'WV':np.array(vwnd, float)}
def write2epic( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time[0]))
ncinstance.variable_init()
ncinstance.add_coord_data(time1=time[0], time2=time[1], latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
def write2epic_cf( file_name, stationid, time, lat_lon, data ):
ncinstance = ncutil.EPIC_NC_SST_cf(savefile=file_name)
ncinstance.file_create()
ncinstance.sbeglobal_atts()
ncinstance.PMELglobal_atts(Station_Name=stationid, file_name=( __file__.split('/')[-1]) )
ncinstance.dimension_init(len_time=len(time))
ncinstance.variable_init()
ncinstance.add_coord_data(time=time, latitude=lat_lon[0], longitude=-1 * lat_lon[1], \
depth_level=10. )
ncinstance.add_data('WU_422', data[0])
ncinstance.add_data('WV_423', data[1])
ncinstance.add_data('AT_21', data[2])
ncinstance.close()
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
""" Ingest EPIC date or NCEP Date and provide python serial date"""
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def pydate2EPIC(file_time):
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
time1 = np.floor(file_time) + offset #truncate to get day and add 2440000 for true julian day
time2 = ( file_time - np.floor(file_time) ) * (1000. * 60. * 60.* 24.) #milliseconds since 0000GMT
return(time1, time2)
"---"
def triangle_smoothing(data_in):
weights=np.array([0.25,0.5,0.25])
filtered_data = np.convolve(data_in,np.array(weights),'same') #edge effects
return filtered_data
"""------------------------- Topo Modules -------------------------------------------"""
def etopo5_data():
""" read in etopo5 topography/bathymetry. """
file = '/Volumes/WDC_internal/Users/bell/in_and_outbox/Ongoing_Analysis/MapGrids/etopo5.nc'
etopodata = Dataset(file)
topoin = etopodata.variables['bath'][:]
lons = etopodata.variables['X'][:]
lats = etopodata.variables['Y'][:]
etopodata.close()
topoin,lons = shiftgrid(0.,topoin,lons,start=False) # -360 -> 0
lons, lats = np.meshgrid(lons, lats)
return(topoin, lats, lons)
"""------------------------- Main Modules -------------------------------------------"""
parser = argparse.ArgumentParser(description='NARR from Single Station')
parser.add_argument('MooringID', metavar='MooringID', type=str, help='MooringID Name')
parser.add_argument('latitude', metavar='latitude', type=float, help='latitude (+N)')
parser.add_argument('longitude', metavar='longitude', type=float, help='longitude (+W)')
parser.add_argument('years', nargs='+', type=int, help='start and stop year')
parser.add_argument('--DataPath', metavar='DataPath', type=str, help='full path to alternate file')
parser.add_argument("-cf",'--cf', action="store_true", help='cf conventions - primarily in time')
args = parser.parse_args()
### list of files
if args.DataPath:
NARR = args.DataPath
else:
NARR = '/Users/bell/in_and_outbox/data_sets/reanalyis_data/NARR/daily/'
infile = [NARR + 'uwnd.10m.2016.nc'] #used just to get grid sections
print infile
### Grab grid points for future slicing - assume grid is same in all model output
lat_lon = latlon_grid(infile[0])
station_name = [args.MooringID]
sta_lat = [args.latitude]
sta_long = [args.longitude]
#Find NARR nearest point to moorings - haversine formula
station_1 = sphered.nearest_point([sta_lat[0],-1 * sta_long[0]],lat_lon['lat'],lat_lon['lon'], '2d')
station_1_modelpt = [lat_lon['lat'][station_1[3],station_1[4]],lat_lon['lon'][station_1[3],station_1[4]]]
print "station_1 nearest point to %s, %s which is lat:%s , lon:%s" \
% (sta_lat[0], sta_long[0], station_1_modelpt[0], station_1_modelpt[1])
#loop over all requested data
years = range(args.years[0],args.years[1]+1)
for yy in years:
# retrieve only these location's data
# uwnd
infile = NARR + 'uwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1u_f = station_1_data['uwnd']
#station_1u_f = triangle_smoothing(station_1_data['uwnd'])
# retrieve only these location's data
# vwnd
infile = NARR + 'vwnd.10m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
#filter data
station_1v_f = station_1_data['vwnd']
#station_1v_f = triangle_smoothing(station_1_data['vwnd'])
# retrieve only these location's data
# sfc air temp
infile = NARR + 'air.2m.'+ str(yy) + '.nc'
print "Working on file " + infile
station_1_data = from_netcdf_1dsplice(infile, None, station_1[3], station_1[4])
station_1at = station_1_data['air'] -273.15 #Kelvin
#convert to EPIC time
pydate = date2pydate(station_1_data['time'], file_flag='NARR')
epic_time, epic_time1 = pydate2EPIC(pydate)
# output u,v wind components from model grid points
save_to_nc = True
if save_to_nc:
# write to NetCDF
outfile = 'data/NARR_' + station_name[0] + '_' + str(yy) + '.nc'
print "Writing to Epic NetCDF " + outfile
if args.cf:
#days since 1800-1-1 00:00:0.0
date_str_cf = []
write2epic_cf( outfile, station_name[0], date_str_cf, station_1_modelpt, [station_1u_f, station_1v_f, station_1at])
else:
write2epic( outfile, station_name[0], [epic_time, epic_time1], station_1_modelpt, [station_1u_f, station_1v_f, station_1at])
output_to_screen = True
if output_to_screen:
if args.cf:
#days since 1800-1-1 00:00:0.0
date_str_cf = num2date(station_1_data['time'], "hours since 1800-01-01")
print "Ucomp, Vcomp, airtemp"
for i,v in enumerate(station_1u_f):
print "{0}, {1}, {2}, {3}".format(date_str_cf[i], station_1u_f[i], station_1v_f[i], station_1at[i])
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Lint format strings: This program checks that the number of arguments passed
# to a variadic format string function matches the number of format specifiers
# in the format string.
import argparse
import doctest
import re
import sys
FALSE_POSITIVES = [
("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"),
("src/index/base.cpp", "FatalError(const char *fmt, const Args &... args)"),
("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char *fmt, const Args &... args)"),
("src/util/system.cpp",
"strprintf(_(COPYRIGHT_HOLDERS).translated, COPYRIGHT_HOLDERS_SUBSTITUTION)"),
("src/tinyformat.h", "printf(const char *fmt, const Args &... args)"),
("src/tinyformat.h", "printf(const char *fmt, TINYFORMAT_VARARGS(n))"),
("src/wallet/wallet.h",
"LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"),
("src/logging.h", "LogPrintf(const char *fmt, const Args &... args)"),
]
FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS = [
("FatalError", 0),
("fprintf", 1),
("LogConnectFailure", 1),
("LogPrint", 1),
("LogPrintf", 0),
("printf", 0),
("snprintf", 2),
("sprintf", 1),
("strprintf", 0),
("tfm::format", 1), # Assuming tfm::::format(std::ostream&, ...
("vfprintf", 1),
("vprintf", 1),
("vsnprintf", 1),
("vsprintf", 1),
]
def parse_function_calls(function_name, source_code):
"""Return an array with all calls to function function_name in string source_code.
Preprocessor directives and C++ style comments ("//") in source_code are removed.
>>> len(parse_function_calls("foo", "foo();bar();foo();bar();"))
2
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[0].startswith("foo(1);")
True
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[1].startswith("foo(2);")
True
>>> len(parse_function_calls("foo", "foo();bar();// foo();bar();"))
1
>>> len(parse_function_calls("foo", "#define FOO foo();"))
0
"""
assert isinstance(function_name, str) and isinstance(
source_code, str) and function_name
lines = [re.sub("// .*", " ", line).strip()
for line in source_code.split("\n")
if not line.strip().startswith("#")]
return re.findall(
r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines))
def normalize(s):
"""Return a normalized version of string s with newlines, tabs and C style comments ("/* ... */")
replaced with spaces. Multiple spaces are replaced with a single space.
>>> normalize(" /* nothing */ foo\tfoo /* bar */ foo ")
'foo foo foo'
"""
assert isinstance(s, str)
s = s.replace("\n", " ")
s = s.replace("\t", " ")
s = re.sub(r"/\*.*?\*/", " ", s)
s = re.sub(" {2,}", " ", s)
return s.strip()
ESCAPE_MAP = {
r"\n": "[escaped-newline]",
r"\t": "[escaped-tab]",
r'\"': "[escaped-quote]",
}
def escape(s):
"""Return the escaped version of string s with "\\\"", "\\n" and "\\t" escaped as
"[escaped-backslash]", "[escaped-newline]" and "[escaped-tab]".
>>> unescape(escape("foo")) == "foo"
True
>>> escape(r'foo \\t foo \\n foo \\\\ foo \\ foo \\"bar\\"')
'foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]'
"""
assert isinstance(s, str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(raw_value, escaped_value)
return s
def unescape(s):
"""Return the unescaped version of escaped string s.
Reverses the replacements made in function escape(s).
>>> unescape(escape("bar"))
'bar'
>>> unescape("foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]")
'foo \\\\t foo \\\\n foo \\\\\\\\ foo \\\\ foo \\\\"bar\\\\"'
"""
assert isinstance(s, str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(escaped_value, raw_value)
return s
def parse_function_call_and_arguments(function_name, function_call):
"""Split string function_call into an array of strings consisting of:
* the string function_call followed by "("
* the function call argument #1
* ...
* the function call argument #n
* a trailing ");"
The strings returned are in escaped form. See escape(...).
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s %s", "foo", "bar");')
['foo(', '"%s %s",', ' "foo",', ' "bar"', ')']
>>> parse_function_call_and_arguments("fooprintf", 'fooprintf("%050d", i);')
['fooprintf(', '"%050d",', ' i', ')']
>>> parse_function_call_and_arguments("foo", 'foo(bar(foobar(barfoo("foo"))), foobar); barfoo')
['foo(', 'bar(foobar(barfoo("foo"))),', ' foobar', ')']
>>> parse_function_call_and_arguments("foo", "foo()")
['foo(', '', ')']
>>> parse_function_call_and_arguments("foo", "foo(123)")
['foo(', '123', ')']
>>> parse_function_call_and_arguments("foo", 'foo("foo")')
['foo(', '"foo"', ')']
"""
assert isinstance(function_name, str) and isinstance(
function_call, str) and function_name
remaining = normalize(escape(function_call))
expected_function_call = "{}(".format(function_name)
assert remaining.startswith(expected_function_call)
parts = [expected_function_call]
remaining = remaining[len(expected_function_call):]
open_parentheses = 1
in_string = False
parts.append("")
for char in remaining:
parts.append(parts.pop() + char)
if char == "\"":
in_string = not in_string
continue
if in_string:
continue
if char == "(":
open_parentheses += 1
continue
if char == ")":
open_parentheses -= 1
if open_parentheses > 1:
continue
if open_parentheses == 0:
parts.append(parts.pop()[:-1])
parts.append(char)
break
if char == ",":
parts.append("")
return parts
def parse_string_content(argument):
"""Return the text within quotes in string argument.
>>> parse_string_content('1 "foo %d bar" 2')
'foo %d bar'
>>> parse_string_content('1 foobar 2')
''
>>> parse_string_content('1 "bar" 2')
'bar'
>>> parse_string_content('1 "foo" 2 "bar" 3')
'foobar'
>>> parse_string_content('1 "foo" 2 " " "bar" 3')
'foo bar'
>>> parse_string_content('""')
''
>>> parse_string_content('')
''
>>> parse_string_content('1 2 3')
''
"""
assert isinstance(argument, str)
string_content = ""
in_string = False
for char in normalize(escape(argument)):
if char == "\"":
in_string = not in_string
elif in_string:
string_content += char
return string_content
def count_format_specifiers(format_string):
"""Return the number of format specifiers in string format_string.
>>> count_format_specifiers("foo bar foo")
0
>>> count_format_specifiers("foo %d bar foo")
1
>>> count_format_specifiers("foo %d bar %i foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo %d foo")
3
>>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo")
4
"""
assert isinstance(format_string, str)
n = 0
in_specifier = False
for i, char in enumerate(format_string):
if format_string[i - 1:i +
1] == "%%" or format_string[i:i + 2] == "%%":
pass
elif char == "%":
in_specifier = True
n += 1
elif char in "aAcdeEfFgGinopsuxX":
in_specifier = False
elif in_specifier and char == "*":
n += 1
return n
def main(args_in):
""" Return a string output with information on string format errors
>>> main(["test/lint/lint-format-strings-tests.txt"])
test/lint/lint-format-strings-tests.txt: Expected 1 argument(s) after format string but found 2 argument(s): printf("%d", 1, 2)
test/lint/lint-format-strings-tests.txt: Expected 2 argument(s) after format string but found 3 argument(s): printf("%a %b", 1, 2, "anything")
test/lint/lint-format-strings-tests.txt: Expected 1 argument(s) after format string but found 0 argument(s): printf("%d")
test/lint/lint-format-strings-tests.txt: Expected 3 argument(s) after format string but found 2 argument(s): printf("%a%b%z", 1, "anything")
>>> main(["test/lint/lint-format-strings-tests-skip-arguments.txt"])
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 2 argument(s): fprintf(skipped, "%d", 1, 2)
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 0 argument(s): fprintf(skipped, "%d")
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 2 argument(s): snprintf(skip1, skip2, "%d", 1, 2)
test/lint/lint-format-strings-tests-skip-arguments.txt: Expected 1 argument(s) after format string but found 0 argument(s): snprintf(skip1, skip2, "%d")
test/lint/lint-format-strings-tests-skip-arguments.txt: Could not parse function call string "snprintf(...)": snprintf(skip1, "%d")
"""
parser = argparse.ArgumentParser(description="This program checks that the number of arguments passed "
"to a variadic format string function matches the number of format "
"specifiers in the format string.")
parser.add_argument("file", type=argparse.FileType(
"r", encoding="utf-8"), nargs="*", help="C++ source code file (e.g. foo.cpp)")
args = parser.parse_args(args_in)
for f in args.file:
file_content = f.read()
for (function_name,
skip_arguments) in FUNCTION_NAMES_AND_NUMBER_OF_LEADING_ARGUMENTS:
for function_call_str in parse_function_calls(
function_name, file_content):
parts = parse_function_call_and_arguments(
function_name, function_call_str)
relevant_function_call_str = unescape("".join(parts))[:512]
if (f.name, relevant_function_call_str) in FALSE_POSITIVES:
continue
if len(parts) < 3 + skip_arguments:
print("{}: Could not parse function call string \"{}(...)\": {}".format(
f.name, function_name, relevant_function_call_str))
continue
argument_count = len(parts) - 3 - skip_arguments
format_str = parse_string_content(parts[1 + skip_arguments])
format_specifier_count = count_format_specifiers(format_str)
if format_specifier_count != argument_count:
print("{}: Expected {} argument(s) after format string but found {} argument(s): {}".format(
f.name, format_specifier_count, argument_count, relevant_function_call_str))
continue
if __name__ == "__main__":
doctest.testmod()
main(sys.argv[1:])
|
|
"""Tests for the Start.ca sensor platform."""
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.startca.sensor import StartcaData
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
DATA_GIGABYTES,
HTTP_NOT_FOUND,
PERCENTAGE,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
async def test_capped_setup(hass, aioclient_mock):
"""Test the default setup."""
config = {
"platform": "startca",
"api_key": "NOTAKEY",
"total_bandwidth": 400,
"monitored_variables": [
"usage",
"usage_gb",
"limit",
"used_download",
"used_upload",
"used_total",
"grace_download",
"grace_upload",
"grace_total",
"total_download",
"total_upload",
"used_remaining",
],
}
result = (
'<?xml version="1.0" encoding="ISO-8859-15"?>'
"<usage>"
"<version>1.1</version>"
"<total> <!-- total actual usage -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</total>"
"<used> <!-- part of usage that counts against quota -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</used>"
"<grace> <!-- part of usage that is free -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</grace>"
"</usage>"
)
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", text=result
)
await async_setup_component(hass, "sensor", {"sensor": config})
await hass.async_block_till_done()
state = hass.states.get("sensor.start_ca_usage_ratio")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "76.24"
state = hass.states.get("sensor.start_ca_usage")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_data_limit")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "400"
state = hass.states.get("sensor.start_ca_used_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_used_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_used_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "311.43"
state = hass.states.get("sensor.start_ca_grace_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_grace_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_grace_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "311.43"
state = hass.states.get("sensor.start_ca_total_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_total_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_remaining")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "95.05"
async def test_unlimited_setup(hass, aioclient_mock):
"""Test the default setup."""
config = {
"platform": "startca",
"api_key": "NOTAKEY",
"total_bandwidth": 0,
"monitored_variables": [
"usage",
"usage_gb",
"limit",
"used_download",
"used_upload",
"used_total",
"grace_download",
"grace_upload",
"grace_total",
"total_download",
"total_upload",
"used_remaining",
],
}
result = (
'<?xml version="1.0" encoding="ISO-8859-15"?>'
"<usage>"
"<version>1.1</version>"
"<total> <!-- total actual usage -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</total>"
"<used> <!-- part of usage that counts against quota -->"
"<download>0</download>"
"<upload>0</upload>"
"</used>"
"<grace> <!-- part of usage that is free -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</grace>"
"</usage>"
)
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", text=result
)
await async_setup_component(hass, "sensor", {"sensor": config})
await hass.async_block_till_done()
state = hass.states.get("sensor.start_ca_usage_ratio")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "0"
state = hass.states.get("sensor.start_ca_usage")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_data_limit")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "inf"
state = hass.states.get("sensor.start_ca_used_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_used_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_used_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_grace_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_grace_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_grace_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "311.43"
state = hass.states.get("sensor.start_ca_total_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_total_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_remaining")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "inf"
async def test_bad_return_code(hass, aioclient_mock):
"""Test handling a return code that isn't HTTP OK."""
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", status=HTTP_NOT_FOUND
)
scd = StartcaData(hass.loop, async_get_clientsession(hass), "NOTAKEY", 400)
result = await scd.async_update()
assert result is False
async def test_bad_json_decode(hass, aioclient_mock):
"""Test decoding invalid json result."""
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", text="this is not xml"
)
scd = StartcaData(hass.loop, async_get_clientsession(hass), "NOTAKEY", 400)
result = await scd.async_update()
assert result is False
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enought inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'foo': 'bar'})
raise AssertionError("Accepted invalid option foo")
except JSONRPCException as e:
assert("Unexpected key foo" in e.error['message'])
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
try:
self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': 'foobar'})
raise AssertionError("Accepted invalid ion address")
except JSONRPCException as e:
assert("changeAddress must be a valid ion address" in e.error['message'])
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 2})
except JSONRPCException as e:
assert('changePosition out of bounds' == e.error['message'])
else:
assert(False)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0];
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
try:
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
raise AssertionError("Spent more than available")
except JSONRPCException as e:
assert("Insufficient" in e.error['message'])
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_nodes(self.nodes)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
try:
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('Keypool ran out' in e.error['message'])
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
try:
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.2)
raise AssertionError("Wallet unlocked without passphrase")
except JSONRPCException as e:
assert('walletpassphrase' in e.error['message'])
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 100)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
if __name__ == '__main__':
RawTransactionsTest().main()
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
import tensorflow as tf
from tensorboard.backend.event_processing import io_wrapper
class IoWrapperTest(tf.test.TestCase):
def testIsGcsPathIsTrue(self):
self.assertTrue(io_wrapper.IsGCSPath('gs://bucket/foo'))
def testIsGcsPathIsFalse(self):
self.assertFalse(io_wrapper.IsGCSPath('/tmp/foo'))
def testIsCnsPathTrue(self):
self.assertTrue(io_wrapper.IsCnsPath('/cns/foo/bar'))
def testIsCnsPathFalse(self):
self.assertFalse(io_wrapper.IsCnsPath('/tmp/foo'))
def testIsIsTensorFlowEventsFileTrue(self):
self.assertTrue(
io_wrapper.IsTensorFlowEventsFile(
'/logdir/events.out.tfevents.1473720042.com'))
def testIsIsTensorFlowEventsFileFalse(self):
self.assertFalse(
io_wrapper.IsTensorFlowEventsFile('/logdir/model.ckpt'))
def testIsIsTensorFlowEventsFileWithEmptyInput(self):
with six.assertRaisesRegex(self,
ValueError,
r'Path must be a nonempty string'):
io_wrapper.IsTensorFlowEventsFile('')
def testListDirectoryAbsolute(self):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self._CreateDeepDirectoryStructure(temp_dir)
expected_files = (
'foo',
'bar',
'quuz',
'a.tfevents.1',
'model.ckpt',
'waldo',
)
self.assertItemsEqual(
(os.path.join(temp_dir, f) for f in expected_files),
io_wrapper.ListDirectoryAbsolute(temp_dir))
def testListRecursivelyViaGlobbing(self):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self._CreateDeepDirectoryStructure(temp_dir)
expected = [
['', [
'foo',
'bar',
'a.tfevents.1',
'model.ckpt',
'quuz',
'waldo',
]],
['bar', [
'b.tfevents.1',
'red_herring.txt',
'baz',
'quux',
]],
['bar/baz', [
'c.tfevents.1',
'd.tfevents.1',
]],
['bar/quux', [
'some_flume_output.txt',
'some_more_flume_output.txt',
]],
['quuz', [
'e.tfevents.1',
'garply',
]],
['quuz/garply', [
'f.tfevents.1',
'corge',
'grault',
]],
['quuz/garply/corge', [
'g.tfevents.1'
]],
['quuz/garply/grault', [
'h.tfevents.1',
]],
['waldo', [
'fred',
]],
['waldo/fred', [
'i.tfevents.1',
]],
]
for pair in expected:
# If this is not the top-level directory, prepend the high-level
# directory.
pair[0] = os.path.join(temp_dir, pair[0]) if pair[0] else temp_dir
pair[1] = [os.path.join(pair[0], f) for f in pair[1]]
self._CompareFilesPerSubdirectory(
expected, io_wrapper.ListRecursivelyViaGlobbing(temp_dir))
def testListRecursivelyViaGlobbingForPathWithGlobCharacters(self):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
directory_names = (
'ba*',
'ba*/subdirectory',
'bar',
)
for directory_name in directory_names:
os.makedirs(os.path.join(temp_dir, directory_name))
file_names = (
'ba*/a.tfevents.1',
'ba*/subdirectory/b.tfevents.1',
'bar/c.tfevents.1',
)
for file_name in file_names:
open(os.path.join(temp_dir, file_name), 'w').close()
expected = [
['', [
'a.tfevents.1',
'subdirectory',
]],
['subdirectory', [
'b.tfevents.1',
]],
# The contents of the bar subdirectory should be excluded from
# this listing because the * character should have been escaped.
]
top = os.path.join(temp_dir, 'ba*')
for pair in expected:
# If this is not the top-level directory, prepend the high-level
# directory.
pair[0] = os.path.join(top, pair[0]) if pair[0] else top
pair[1] = [os.path.join(pair[0], f) for f in pair[1]]
self._CompareFilesPerSubdirectory(
expected, io_wrapper.ListRecursivelyViaGlobbing(top))
def testListRecursivelyViaWalking(self):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self._CreateDeepDirectoryStructure(temp_dir)
expected = [
['', [
'a.tfevents.1',
'model.ckpt',
]],
['foo', []],
['bar', [
'b.tfevents.1',
'red_herring.txt',
]],
['bar/baz', [
'c.tfevents.1',
'd.tfevents.1',
]],
['bar/quux', [
'some_flume_output.txt',
'some_more_flume_output.txt',
]],
['quuz', [
'e.tfevents.1',
]],
['quuz/garply', [
'f.tfevents.1',
]],
['quuz/garply/corge', [
'g.tfevents.1',
]],
['quuz/garply/grault', [
'h.tfevents.1',
]],
['waldo', []],
['waldo/fred', [
'i.tfevents.1',
]],
]
for pair in expected:
# If this is not the top-level directory, prepend the high-level
# directory.
pair[0] = os.path.join(temp_dir, pair[0]) if pair[0] else temp_dir
pair[1] = [os.path.join(pair[0], f) for f in pair[1]]
self._CompareFilesPerSubdirectory(
expected, io_wrapper.ListRecursivelyViaWalking(temp_dir))
def testGetLogdirSubdirectories(self):
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self._CreateDeepDirectoryStructure(temp_dir)
# Only subdirectories that immediately contains at least 1 events
# file should be listed.
expected = [
'',
'bar',
'bar/baz',
'quuz',
'quuz/garply',
'quuz/garply/corge',
'quuz/garply/grault',
'waldo/fred',
]
self.assertItemsEqual(
[(os.path.join(temp_dir, subdir) if subdir else temp_dir)
for subdir in expected],
io_wrapper.GetLogdirSubdirectories(temp_dir))
def _CreateDeepDirectoryStructure(self, top_directory):
"""Creates a reasonable deep structure of subdirectories with files.
Args:
top_directory: The absolute path of the top level directory in
which to create the directory structure.
"""
# Add a few subdirectories.
directory_names = (
# An empty directory.
'foo',
# A directory with an events file (and a text file).
'bar',
# A deeper directory with events files.
'bar/baz',
# A non-empty subdirectory that lacks event files (should be ignored).
'bar/quux',
# This 3-level deep set of subdirectories tests logic that replaces the
# full glob string with an absolute path prefix if there is only 1
# subdirectory in the final mapping.
'quuz/garply',
'quuz/garply/corge',
'quuz/garply/grault',
# A directory that lacks events files, but contains a subdirectory
# with events files (first level should be ignored, second level should
# be included).
'waldo',
'waldo/fred',
)
for directory_name in directory_names:
os.makedirs(os.path.join(top_directory, directory_name))
# Add a few files to the directory.
file_names = (
'a.tfevents.1',
'model.ckpt',
'bar/b.tfevents.1',
'bar/red_herring.txt',
'bar/baz/c.tfevents.1',
'bar/baz/d.tfevents.1',
'bar/quux/some_flume_output.txt',
'bar/quux/some_more_flume_output.txt',
'quuz/e.tfevents.1',
'quuz/garply/f.tfevents.1',
'quuz/garply/corge/g.tfevents.1',
'quuz/garply/grault/h.tfevents.1',
'waldo/fred/i.tfevents.1',
)
for file_name in file_names:
open(os.path.join(top_directory, file_name), 'w').close()
def _CompareFilesPerSubdirectory(self, expected, gotten):
"""Compares iterables of (subdirectory path, list of absolute paths)
Args:
expected: The expected iterable of 2-tuples.
gotten: The gotten iterable of 2-tuples.
"""
expected_directory_to_listing = {
result[0]: list(result[1]) for result in expected}
gotten_directory_to_listing = {
result[0]: list(result[1]) for result in gotten}
self.assertItemsEqual(
expected_directory_to_listing.keys(),
gotten_directory_to_listing.keys())
for subdirectory, expected_listing in expected_directory_to_listing.items():
gotten_listing = gotten_directory_to_listing[subdirectory]
self.assertItemsEqual(
expected_listing,
gotten_listing,
'Files for subdirectory %r must match. Expected %r. Got %r.' % (
subdirectory, expected_listing, gotten_listing))
if __name__ == '__main__':
tf.test.main()
|
|
# Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides stubs for os, sys and subprocess for testing
This test allows one to test code that itself uses os, sys, and subprocess.
"""
import os
import re
import shlex
import sys
class Override(object):
def __init__(self, base_module, module_list):
stubs = {'adb_commands': AdbCommandsModuleStub,
'cloud_storage': CloudStorageModuleStub,
'open': OpenFunctionStub,
'os': OsModuleStub,
'perf_control': PerfControlModuleStub,
'raw_input': RawInputFunctionStub,
'subprocess': SubprocessModuleStub,
'sys': SysModuleStub,
'thermal_throttle': ThermalThrottleModuleStub,
'logging': LoggingStub,
'certutils': CertUtilsStub,
'adb_install_cert': AdbInstallCertStub,
'platformsettings': PlatformSettingsStub,
}
self.adb_commands = None
self.os = None
self.subprocess = None
self.sys = None
self._base_module = base_module
self._overrides = {}
for module_name in module_list:
self._overrides[module_name] = getattr(base_module, module_name, None)
setattr(self, module_name, stubs[module_name]())
setattr(base_module, module_name, getattr(self, module_name))
if self.os and self.sys:
self.os.path.sys = self.sys
def __del__(self):
assert not len(self._overrides)
def Restore(self):
for module_name, original_module in self._overrides.iteritems():
if original_module is None:
# This will happen when we override built-in functions, like open.
# If we don't delete the attribute, we will shadow the built-in
# function with an attribute set to None.
delattr(self._base_module, module_name)
else:
setattr(self._base_module, module_name, original_module)
self._overrides = {}
class AdbDevice(object):
def __init__(self):
self.has_root = False
self.needs_su = False
self.shell_command_handlers = {}
self.mock_content = []
self.system_properties = {}
if self.system_properties.get('ro.product.cpu.abi') == None:
self.system_properties['ro.product.cpu.abi'] = 'armeabi-v7a'
def HasRoot(self):
return self.has_root
def NeedsSU(self):
return self.needs_su
def RunShellCommand(self, args, **_kwargs):
if isinstance(args, basestring):
args = shlex.split(args)
handler = self.shell_command_handlers[args[0]]
return handler(args)
def FileExists(self, _):
return False
def ReadFile(self, device_path, as_root=False): # pylint: disable=W0613
return self.mock_content
def GetProp(self, property_name):
return self.system_properties[property_name]
def SetProp(self, property_name, property_value):
self.system_properties[property_name] = property_value
class AdbCommandsModuleStub(object):
class AdbCommandsStub(object):
def __init__(self, module, device):
self._module = module
self._device = device
self.is_root_enabled = True
self._adb_device = module.adb_device
def IsRootEnabled(self):
return self.is_root_enabled
def RestartAdbdOnDevice(self):
pass
def IsUserBuild(self):
return False
def WaitForDevicePm(self):
pass
def device(self):
return self._adb_device
def device_serial(self):
return self._device
def __init__(self):
self.attached_devices = []
self.apk_package_name = None
self.adb_device = AdbDevice()
def AdbCommandsStubConstructor(device=None):
return AdbCommandsModuleStub.AdbCommandsStub(self, device)
self.AdbCommands = AdbCommandsStubConstructor
@staticmethod
def IsAndroidSupported():
return True
def GetPackageName(self, _):
return self.apk_package_name
def GetAttachedDevices(self):
return self.attached_devices
def CleanupLeftoverProcesses(self):
pass
class CloudStorageModuleStub(object):
PUBLIC_BUCKET = 'chromium-telemetry'
PARTNER_BUCKET = 'chrome-partner-telemetry'
INTERNAL_BUCKET = 'chrome-telemetry'
BUCKET_ALIASES = {
'public': PUBLIC_BUCKET,
'partner': PARTNER_BUCKET,
'internal': INTERNAL_BUCKET,
}
# These are used to test for CloudStorage errors.
INTERNAL_PERMISSION = 2
PARTNER_PERMISSION = 1
PUBLIC_PERMISSION = 0
# Not logged in.
CREDENTIALS_ERROR_PERMISSION = -1
class NotFoundError(Exception):
pass
class CloudStorageError(Exception):
pass
class PermissionError(CloudStorageError):
pass
class CredentialsError(CloudStorageError):
pass
def __init__(self):
self.default_remote_paths = {CloudStorageModuleStub.INTERNAL_BUCKET:{},
CloudStorageModuleStub.PARTNER_BUCKET:{},
CloudStorageModuleStub.PUBLIC_BUCKET:{}}
self.remote_paths = self.default_remote_paths
self.local_file_hashes = {}
self.local_hash_files = {}
self.permission_level = CloudStorageModuleStub.INTERNAL_PERMISSION
self.downloaded_files = []
def SetPermissionLevelForTesting(self, permission_level):
self.permission_level = permission_level
def CheckPermissionLevelForBucket(self, bucket):
if bucket == CloudStorageModuleStub.PUBLIC_BUCKET:
return
elif (self.permission_level ==
CloudStorageModuleStub.CREDENTIALS_ERROR_PERMISSION):
raise CloudStorageModuleStub.CredentialsError()
elif bucket == CloudStorageModuleStub.PARTNER_BUCKET:
if self.permission_level < CloudStorageModuleStub.PARTNER_PERMISSION:
raise CloudStorageModuleStub.PermissionError()
elif bucket == CloudStorageModuleStub.INTERNAL_BUCKET:
if self.permission_level < CloudStorageModuleStub.INTERNAL_PERMISSION:
raise CloudStorageModuleStub.PermissionError()
elif bucket not in self.remote_paths:
raise CloudStorageModuleStub.NotFoundError()
def SetRemotePathsForTesting(self, remote_path_dict=None):
if not remote_path_dict:
self.remote_paths = self.default_remote_paths
return
self.remote_paths = remote_path_dict
def GetRemotePathsForTesting(self):
if not self.remote_paths:
self.remote_paths = self.default_remote_paths
return self.remote_paths
# Set a dictionary of data files and their "calculated" hashes.
def SetCalculatedHashesForTesting(self, calculated_hash_dictionary):
self.local_file_hashes = calculated_hash_dictionary
def GetLocalDataFiles(self):
return self.local_file_hashes.keys()
# Set a dictionary of hash files and the hashes they should contain.
def SetHashFileContentsForTesting(self, hash_file_dictionary):
self.local_hash_files = hash_file_dictionary
def GetLocalHashFiles(self):
return self.local_hash_files.keys()
def ChangeRemoteHashForTesting(self, bucket, remote_path, new_hash):
self.remote_paths[bucket][remote_path] = new_hash
def List(self, bucket):
if not bucket or not bucket in self.remote_paths:
bucket_error = ('Incorrect bucket specified, correct buckets:' +
str(self.remote_paths))
raise CloudStorageModuleStub.CloudStorageError(bucket_error)
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
return list(self.remote_paths[bucket].keys())
def Exists(self, bucket, remote_path):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
return remote_path in self.remote_paths[bucket]
def Insert(self, bucket, remote_path, local_path):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
if not local_path in self.GetLocalDataFiles():
file_path_error = 'Local file path does not exist'
raise CloudStorageModuleStub.CloudStorageError(file_path_error)
self.remote_paths[bucket][remote_path] = (
CloudStorageModuleStub.CalculateHash(self, local_path))
return remote_path
def GetHelper(self, bucket, remote_path, local_path, only_if_changed):
CloudStorageModuleStub.CheckPermissionLevelForBucket(self, bucket)
if not remote_path in self.remote_paths[bucket]:
if only_if_changed:
return False
raise CloudStorageModuleStub.NotFoundError('Remote file does not exist.')
remote_hash = self.remote_paths[bucket][remote_path]
local_hash = self.local_file_hashes[local_path]
if only_if_changed and remote_hash == local_hash:
return False
self.downloaded_files.append(remote_path)
self.local_file_hashes[local_path] = remote_hash
self.local_hash_files[local_path + '.sha1'] = remote_hash
return remote_hash
def Get(self, bucket, remote_path, local_path):
return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
local_path, False)
def GetIfChanged(self, local_path, bucket=None):
remote_path = os.path.basename(local_path)
if bucket:
return CloudStorageModuleStub.GetHelper(self, bucket, remote_path,
local_path, True)
result = CloudStorageModuleStub.GetHelper(
self, self.PUBLIC_BUCKET, remote_path, local_path, True)
if not result:
result = CloudStorageModuleStub.GetHelper(
self, self.PARTNER_BUCKET, remote_path, local_path, True)
if not result:
result = CloudStorageModuleStub.GetHelper(
self, self.INTERNAL_BUCKET, remote_path, local_path, True)
return result
def GetFilesInDirectoryIfChanged(self, directory, bucket):
if os.path.dirname(directory) == directory: # If in the root dir.
raise ValueError('Trying to serve root directory from HTTP server.')
for dirpath, _, filenames in os.walk(directory):
for filename in filenames:
path, extension = os.path.splitext(
os.path.join(dirpath, filename))
if extension != '.sha1':
continue
self.GetIfChanged(path, bucket)
def CalculateHash(self, file_path):
return self.local_file_hashes[file_path]
def ReadHash(self, hash_path):
return self.local_hash_files[hash_path]
class LoggingStub(object):
def __init__(self):
self.warnings = []
self.errors = []
def info(self, msg, *args):
pass
def error(self, msg, *args):
self.errors.append(msg % args)
def warning(self, msg, *args):
self.warnings.append(msg % args)
def warn(self, msg, *args):
self.warning(msg, *args)
class OpenFunctionStub(object):
class FileStub(object):
def __init__(self, data):
self._data = data
def __enter__(self):
return self
def __exit__(self, *args):
pass
def read(self, size=None):
if size:
return self._data[:size]
else:
return self._data
def write(self, data):
self._data.write(data)
def close(self):
pass
def __init__(self):
self.files = {}
def __call__(self, name, *args, **kwargs):
return OpenFunctionStub.FileStub(self.files[name])
class OsModuleStub(object):
class OsEnvironModuleStub(object):
def get(self, _):
return None
class OsPathModuleStub(object):
def __init__(self, sys_module):
self.sys = sys_module
self.files = []
self.dirs = []
def exists(self, path):
return path in self.files
def isfile(self, path):
return path in self.files
def isdir(self, path):
return path in self.dirs
def join(self, *paths):
def IsAbsolutePath(path):
if self.sys.platform.startswith('win'):
return re.match('[a-zA-Z]:\\\\', path)
else:
return path.startswith('/')
# Per Python specification, if any component is an absolute path,
# discard previous components.
for index, path in reversed(list(enumerate(paths))):
if IsAbsolutePath(path):
paths = paths[index:]
break
if self.sys.platform.startswith('win'):
tmp = os.path.join(*paths)
return tmp.replace('/', '\\')
else:
tmp = os.path.join(*paths)
return tmp.replace('\\', '/')
@staticmethod
def abspath(path):
return os.path.abspath(path)
@staticmethod
def expanduser(path):
return os.path.expanduser(path)
@staticmethod
def dirname(path):
return os.path.dirname(path)
@staticmethod
def splitext(path):
return os.path.splitext(path)
@staticmethod
def splitdrive(path):
return os.path.splitdrive(path)
X_OK = os.X_OK
sep = os.sep
pathsep = os.pathsep
def __init__(self, sys_module=sys):
self.path = OsModuleStub.OsPathModuleStub(sys_module)
self.environ = OsModuleStub.OsEnvironModuleStub()
self.display = ':0'
self.local_app_data = None
self.sys_path = None
self.program_files = None
self.program_files_x86 = None
self.devnull = os.devnull
self._directory = {}
def access(self, path, _):
return path in self.path.files
def getenv(self, name, value=None):
if name == 'DISPLAY':
env = self.display
elif name == 'LOCALAPPDATA':
env = self.local_app_data
elif name == 'PATH':
env = self.sys_path
elif name == 'PROGRAMFILES':
env = self.program_files
elif name == 'PROGRAMFILES(X86)':
env = self.program_files_x86
else:
raise NotImplementedError('Unsupported getenv')
return env if env else value
def chdir(self, path):
pass
def walk(self, top):
for dir_name in self._directory:
yield top, dir_name, self._directory[dir_name]
class PerfControlModuleStub(object):
class PerfControlStub(object):
def __init__(self, adb):
pass
def __init__(self):
self.PerfControl = PerfControlModuleStub.PerfControlStub
class RawInputFunctionStub(object):
def __init__(self):
self.input = ''
def __call__(self, name, *args, **kwargs):
return self.input
class SubprocessModuleStub(object):
class PopenStub(object):
def __init__(self):
self.communicate_result = ('', '')
self.returncode_result = 0
def __call__(self, args, **kwargs):
return self
def communicate(self):
return self.communicate_result
@property
def returncode(self):
return self.returncode_result
def __init__(self):
self.Popen = SubprocessModuleStub.PopenStub()
self.PIPE = None
def call(self, *args, **kwargs):
pass
class SysModuleStub(object):
def __init__(self):
self.platform = ''
class ThermalThrottleModuleStub(object):
class ThermalThrottleStub(object):
def __init__(self, adb):
pass
def __init__(self):
self.ThermalThrottle = ThermalThrottleModuleStub.ThermalThrottleStub
class CertUtilsStub(object):
openssl_import_error = None
@staticmethod
def write_dummy_ca_cert(_ca_cert_str, _key_str, cert_path):
pass
@staticmethod
def generate_dummy_ca_cert():
return '-', '-'
class AdbInstallCertStub(object):
class AndroidCertInstaller(object):
def __init__(self, device_id, _cert_name, _cert_path):
if device_id == 'success':
pass
elif device_id == 'failure':
raise Exception('Test exception.')
def install_cert(self, overwrite_cert=False):
pass
class PlatformSettingsStub(object):
@staticmethod
def HasSniSupport():
return True
|
|
"""Nearly exact trust-region optimization subproblem."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.linalg import (norm, get_lapack_funcs, solve_triangular,
cho_solve)
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
__all__ = ['_minimize_trustregion_exact',
'estimate_smallest_singular_value',
'singular_leading_submatrix',
'IterativeSubproblem']
def _minimize_trustregion_exact(fun, x0, args=(), jac=None, hess=None,
**trust_region_options):
"""
Minimization of scalar function of one or more variables using
a nearly exact trust-region algorithm.
Options
-------
initial_tr_radius : float
Initial trust-region radius.
max_tr_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than ``gtol`` before successful
termination.
"""
if jac is None:
raise ValueError('Jacobian is required for trust region '
'exact minimization.')
if hess is None:
raise ValueError('Hessian matrix is required for trust region '
'exact minimization.')
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
subproblem=IterativeSubproblem,
**trust_region_options)
def estimate_smallest_singular_value(U):
"""Given upper triangular matrix ``U`` estimate the smallest singular
value and the correspondent right singular vector in O(n**2) operations.
Parameters
----------
U : ndarray
Square upper triangular matrix.
Returns
-------
s_min : float
Estimated smallest singular value of the provided matrix.
z_min : ndarray
Estimatied right singular vector.
Notes
-----
The procedure is based on [1]_ and is done in two steps. First it finds
a vector ``e`` with components selected from {+1, -1} such that the
solution ``w`` from the system ``U.T w = e`` is as large as possible.
Next it estimate ``U v = w``. The smallest singular value is close
to ``norm(w)/norm(v)`` and the right singular vector is close
to ``v/norm(v)``.
The estimation will be better more ill-conditioned is the matrix.
References
----------
.. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H.
An estimate for the condition number of a matrix. 1979.
SIAM Journal on Numerical Analysis, 16(2), 368-375.
"""
U = np.atleast_2d(U)
m, n = U.shape
if m != n:
raise ValueError("A square triangular matrix should be provided.")
# A vector `e` with components selected from {+1, -1}
# is selected so that the solution `w` to the system
# `U.T w = e` is as large as possible. Implementation
# based on algorithm 3.5.1, p. 142, from reference [2]
# adapted for lower triangular matrix.
p = np.zeros(n)
w = np.empty(n)
# Implemented according to: Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press. pp. 140-142.
for k in range(n):
wp = (1-p[k]) / U.T[k, k]
wm = (-1-p[k]) / U.T[k, k]
pp = p[k+1:] + U.T[k+1:, k]*wp
pm = p[k+1:] + U.T[k+1:, k]*wm
if abs(wp) + norm(pp, 1) >= abs(wm) + norm(pm, 1):
w[k] = wp
p[k+1:] = pp
else:
w[k] = wm
p[k+1:] = pm
# The system `U v = w` is solved using backward substitution.
v = solve_triangular(U, w)
v_norm = norm(v)
w_norm = norm(w)
# Smallest singular value
s_min = w_norm / v_norm
# Associated vector
z_min = v / v_norm
return s_min, z_min
def gershgorin_bounds(H):
"""
Given a square matrix ``H`` compute upper
and lower bounds for its eigenvalues (Gregoshgorin Bounds).
Defined ref. [1].
References
----------
.. [1] Conn, A. R., Gould, N. I., & Toint, P. L.
Trust region methods. 2000. Siam. pp. 19.
"""
H_diag = np.diag(H)
H_diag_abs = np.abs(H_diag)
H_row_sums = np.sum(np.abs(H), axis=1)
lb = np.min(H_diag + H_diag_abs - H_row_sums)
ub = np.max(H_diag - H_diag_abs + H_row_sums)
return lb, ub
def singular_leading_submatrix(A, U, k):
"""
Compute term that makes the leading ``k`` by ``k``
submatrix from ``A`` singular.
Parameters
----------
A : ndarray
Symmetric matrix that is not positive definite.
U : ndarray
Upper triangular matrix resulting of an incomplete
Cholesky decomposition of matrix ``A``.
k : int
Positive integer such that the leading k by k submatrix from
`A` is the first non-positive definite leading submatrix.
Returns
-------
delta : float
Amout that should be added to the element (k, k) of the
leading k by k submatrix of ``A`` to make it singular.
v : ndarray
A vector such that ``v.T B v = 0``. Where B is the matrix A after
``delta`` is added to its element (k, k).
"""
# Compute delta
delta = np.sum(U[:k-1, k-1]**2) - A[k-1, k-1]
n = len(A)
# Inicialize v
v = np.zeros(n)
v[k-1] = 1
# Compute the remaining values of v by solving a triangular system.
if k != 1:
v[:k-1] = solve_triangular(U[:k-1, :k-1], -U[:k-1, k-1])
return delta, v
class IterativeSubproblem(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by nearly exact iterative method.
Notes
-----
This subproblem solver was based on [1]_, [2]_ and [3]_,
which implement similar algorithms. The algorithm is basically
that of [1]_ but ideas from [2]_ and [3]_ were also used.
References
----------
.. [1] A.R. Conn, N.I. Gould, and P.L. Toint, "Trust region methods",
Siam, pp. 169-200, 2000.
.. [2] J. Nocedal and S. Wright, "Numerical optimization",
Springer Science & Business Media. pp. 83-91, 2006.
.. [3] J.J. More and D.C. Sorensen, "Computing a trust region step",
SIAM Journal on Scientific and Statistical Computing, vol. 4(3),
pp. 553-572, 1983.
"""
# UPDATE_COEFF appears in reference [1]_
# in formula 7.3.14 (p. 190) named as "theta".
# As recommended there it value is fixed in 0.01.
UPDATE_COEFF = 0.01
EPS = np.finfo(float).eps
def __init__(self, x, fun, jac, hess, hessp=None,
k_easy=0.1, k_hard=0.2):
super(IterativeSubproblem, self).__init__(x, fun, jac, hess)
# When the trust-region shrinks in two consecutive
# calculations (``tr_radius < previous_tr_radius``)
# the lower bound ``lambda_lb`` may be reused,
# facilitating the convergence. To indicate no
# previous value is known at first ``previous_tr_radius``
# is set to -1 and ``lambda_lb`` to None.
self.previous_tr_radius = -1
self.lambda_lb = None
self.niter = 0
# ``k_easy`` and ``k_hard`` are parameters used
# to detemine the stop criteria to the iterative
# subproblem solver. Take a look at pp. 194-197
# from reference _[1] for a more detailed description.
self.k_easy = k_easy
self.k_hard = k_hard
# Get Lapack function for cholesky decomposition.
# The implemented Scipy wrapper does not return
# the incomplete factorization needed by the method.
self.cholesky, = get_lapack_funcs(('potrf',), (self.hess,))
# Get info about Hessian
self.dimension = len(self.hess)
self.hess_gershgorin_lb,\
self.hess_gershgorin_ub = gershgorin_bounds(self.hess)
self.hess_inf = norm(self.hess, np.Inf)
self.hess_fro = norm(self.hess, 'fro')
# A constant such that for vectors smaler than that
# backward substituition is not reliable. It was stabilished
# based on Golub, G. H., Van Loan, C. F. (2013).
# "Matrix computations". Forth Edition. JHU press., p.165.
self.CLOSE_TO_ZERO = self.dimension * self.EPS * self.hess_inf
def _initial_values(self, tr_radius):
"""Given a trust radius, return a good initial guess for
the damping factor, the lower bound and the upper bound.
The values were chosen accordingly to the guidelines on
section 7.3.8 (p. 192) from [1]_.
"""
# Upper bound for the damping factor
lambda_ub = max(0, self.jac_mag/tr_radius + min(-self.hess_gershgorin_lb,
self.hess_fro,
self.hess_inf))
# Lower bound for the damping factor
lambda_lb = max(0, -min(self.hess.diagonal()),
self.jac_mag/tr_radius - min(self.hess_gershgorin_ub,
self.hess_fro,
self.hess_inf))
# Improve bounds with previous info
if tr_radius < self.previous_tr_radius:
lambda_lb = max(self.lambda_lb, lambda_lb)
# Initial guess for the damping factor
if lambda_lb == 0:
lambda_initial = 0
else:
lambda_initial = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
return lambda_initial, lambda_lb, lambda_ub
def solve(self, tr_radius):
"""Solve quadratic subproblem"""
lambda_current, lambda_lb, lambda_ub = self._initial_values(tr_radius)
n = self.dimension
hits_boundary = True
already_factorized = False
self.niter = 0
while True:
# Compute Cholesky factorization
if already_factorized:
already_factorized = False
else:
H = self.hess+lambda_current*np.eye(n)
U, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
self.niter += 1
# Check if factorization succeded
if info == 0 and self.jac_mag > self.CLOSE_TO_ZERO:
# Successfull factorization
# Solve `U.T U p = s`
p = cho_solve((U, False), -self.jac)
p_norm = norm(p)
# Check for interior convergence
if p_norm <= tr_radius and lambda_current == 0:
hits_boundary = False
break
# Solve `U.T w = p`
w = solve_triangular(U, p, trans='T')
w_norm = norm(w)
# Compute Newton step accordingly to
# formula (4.44) p.87 from ref [2]_.
delta_lambda = (p_norm/w_norm)**2 * (p_norm-tr_radius)/tr_radius
lambda_new = lambda_current + delta_lambda
if p_norm < tr_radius: # Inside boundary
s_min, z_min = estimate_smallest_singular_value(U)
ta, tb = self.get_boundaries_intersections(p, z_min,
tr_radius)
# Choose `step_len` with the smallest magnitude.
# The reason for this choice is explained at
# ref [3]_, p. 6 (Immediately before the formula
# for `tau`).
step_len = min([ta, tb], key=abs)
# Compute the quadratic term (p.T*H*p)
quadratic_term = np.dot(p, np.dot(H, p))
# Check stop criteria
relative_error = (step_len**2 * s_min**2) / (quadratic_term + lambda_current*tr_radius**2)
if relative_error <= self.k_hard:
p += step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Compute Cholesky factorization
H = self.hess + lambda_new*np.eye(n)
c, info = self.cholesky(H, lower=False,
overwrite_a=False,
clean=True)
# Check if the factorization have succeded
#
if info == 0: # Successfull factorization
# Update damping factor
lambda_current = lambda_new
already_factorized = True
else: # Unsuccessfull factorization
# Update uncertanty bounds
lambda_lb = max(lambda_lb, lambda_new)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Outside boundary
# Check stop criteria
relative_error = abs(p_norm - tr_radius) / tr_radius
if relative_error <= self.k_easy:
break
# Update uncertanty bounds
lambda_lb = lambda_current
# Update damping factor
lambda_current = lambda_new
elif info == 0 and self.jac_mag <= self.CLOSE_TO_ZERO:
# jac_mag very close to zero
# Check for interior convergence
if lambda_current == 0:
p = np.zeros(n)
hits_boundary = False
break
s_min, z_min = estimate_smallest_singular_value(U)
step_len = tr_radius
# Check stop criteria
if step_len**2 * s_min**2 <= self.k_hard * lambda_current * tr_radius**2:
p = step_len * z_min
break
# Update uncertanty bounds
lambda_ub = lambda_current
lambda_lb = max(lambda_lb, lambda_current - s_min**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
else: # Unsuccessfull factorization
# Compute auxiliar terms
delta, v = singular_leading_submatrix(H, U, info)
v_norm = norm(v)
# Update uncertanty interval
lambda_lb = max(lambda_lb, lambda_current + delta/v_norm**2)
# Update damping factor
lambda_current = max(np.sqrt(lambda_lb * lambda_ub),
lambda_lb + self.UPDATE_COEFF*(lambda_ub-lambda_lb))
self.lambda_lb = lambda_lb
self.lambda_current = lambda_current
self.previous_tr_radius = tr_radius
return p, hits_boundary
|
|
import re
import sys
from config import panda_config
# logger
from pandalogger.PandaLogger import PandaLogger
_logger = PandaLogger().getLogger('SiteMapper')
# PandaIDs
from PandaSiteIDs import PandaSiteIDs
# default site
from taskbuffer.SiteSpec import SiteSpec
defSite = SiteSpec()
defSite.sitename = panda_config.def_sitename
defSite.nickname = panda_config.def_nickname
defSite.dq2url = panda_config.def_dq2url
defSite.ddm = panda_config.def_ddm
defSite.type = panda_config.def_type
defSite.gatekeeper = panda_config.def_gatekeeper
defSite.status = panda_config.def_status
defSite.setokens = {}
########################################################################
class SiteMapper:
# constructor
def __init__(self,taskBuffer,verbose=False):
_logger.debug('__init__ SiteMapper')
try:
# site list
self.siteSpecList = {}
# sites not belonging to a cloud
self.defCloudSites = []
# cloud specification
self.cloudSpec = {}
# create CloudSpec list
tmpCloudListDB = taskBuffer.getCloudList()
for tmpName,tmpCloudSpec in tmpCloudListDB.iteritems():
self.cloudSpec[tmpName] = {}
# copy attributes from CloudSepc
for tmpAttr in tmpCloudSpec._attributes:
self.cloudSpec[tmpName][tmpAttr] = getattr(tmpCloudSpec,tmpAttr)
# append additional attributes
# source : Panda siteID for source
# dest : Panda siteID for dest
# sites : Panda siteIDs in the cloud
self.cloudSpec[tmpName]['source'] = self.cloudSpec[tmpName]['tier1']
self.cloudSpec[tmpName]['dest'] = self.cloudSpec[tmpName]['tier1']
self.cloudSpec[tmpName]['sites'] = []
_logger.debug('Cloud->%s %s' % (tmpName,str(self.cloudSpec[tmpName])))
# get list of PandaIDs
siteIDsList = taskBuffer.getSiteList()
firstDefault = True
# read full list from DB
siteFullList = taskBuffer.getSiteInfo()
# read DB to produce paramters in siteinfo dynamically
for tmpID,tmpNicknameList in siteIDsList.iteritems():
for tmpNickname in tmpNicknameList:
# invalid nickname
if not siteFullList.has_key(tmpNickname):
continue
# get full spec
ret = siteFullList[tmpNickname]
# append
if ret == None:
_logger.error('Could not read site info for %s:%s' % (tmpID,tmpNickname))
elif (firstDefault and tmpID == defSite.sitename) or (not self.siteSpecList.has_key(tmpID)) \
or (self.siteSpecList.has_key(tmpID) and self.siteSpecList[tmpID].status in ['offline','']):
# overwrite default or remove existing offline
if firstDefault and tmpID == defSite.sitename:
del self.siteSpecList[tmpID]
firstDefault = False
elif self.siteSpecList.has_key(tmpID) and self.siteSpecList[tmpID].status in ['offline','']:
del self.siteSpecList[tmpID]
# append
if not self.siteSpecList.has_key(tmpID):
# determine type following a convention
tmpType = 'production'
if tmpID.startswith('ANALY_'):
tmpType = 'analysis'
elif re.search('test',tmpID,re.I) or \
(PandaSiteIDs.has_key(tmpID) and PandaSiteIDs[tmpID]['status']!='OK'):
tmpType = 'test'
# set type
ret.sitename = tmpID
ret.type = tmpType
# don't use site for production when cloud is undefined
if ret.type == 'production' and ret.cloud == '':
_logger.error('Empty cloud for %s:%s' % (tmpID,tmpNickname))
else:
self.siteSpecList[tmpID] = ret
else:
# overwrite status
if not ret.status in ['offline','']:
if self.siteSpecList[tmpID].status != 'online':
self.siteSpecList[tmpID].status = ret.status
# use larger maxinputsize and memory
try:
if ret.status in ['online']:
if self.siteSpecList[tmpID].maxinputsize < ret.maxinputsize or \
ret.maxinputsize == 0:
self.siteSpecList[tmpID].maxinputsize = ret.maxinputsize
if (self.siteSpecList[tmpID].memory != 0 and self.siteSpecList[tmpID].memory < ret.memory) or \
ret.memory == 0:
self.siteSpecList[tmpID].memory = ret.memory
except:
errtype, errvalue = sys.exc_info()[:2]
_logger.error("%s memory/inputsize failuer : %s %s" % (tmpID,errtype,errvalue))
# make cloudSpec
for siteSpec in self.siteSpecList.values():
# choose only prod sites
if siteSpec.type != 'production':
continue
# append prod site in cloud
for tmpCloud in siteSpec.cloudlist:
if self.cloudSpec.has_key(tmpCloud):
if not siteSpec.sitename in self.cloudSpec[tmpCloud]['sites']:
# append
self.cloudSpec[tmpCloud]['sites'].append(siteSpec.sitename)
else:
# append to the default cloud
if not siteSpec.sitename in self.defCloudSites:
# append
self.defCloudSites.append(siteSpec.sitename)
# set defCloudSites for backward compatibility
if self.cloudSpec.has_key('US'):
# use US sites
self.defCloudSites = self.cloudSpec['US']['sites']
else:
# add def site as a protection if defCloudSites is empty
self.defCloudSites.append(defSite.sitename)
# dump sites
if verbose:
_logger.debug('========= dump =========')
for tmpSite,tmpSiteSpec in self.siteSpecList.iteritems():
_logger.debug('Site->%s' % str(tmpSiteSpec))
# check
for tmpCloud,tmpVals in self.cloudSpec.iteritems():
# set T1
try:
tmpVals['sites'].remove(tmpVals['dest'])
except:
pass
tmpVals['sites'].insert(0,tmpVals['dest'])
# dump
_logger.debug('Cloud:%s has %s' % (tmpCloud,tmpVals['sites']))
for tmpSite in tmpVals['sites']:
if not self.siteSpecList.has_key(tmpSite):
_logger.debug(" '%s' doesn't exist" % tmpSite)
continue
tmpSiteSpec = self.siteSpecList[tmpSite]
if tmpSiteSpec.status in ['offline']:
_logger.debug(' %s:%s' % (tmpSite,tmpSiteSpec.status))
_logger.debug('Cloud:XX has %s' % self.defCloudSites)
except:
type, value, traceBack = sys.exc_info()
_logger.error("__init__ SiteMapper : %s %s" % (type,value))
_logger.debug('__init__ SiteMapper done')
# accessor for site
def getSite(self,site):
if self.siteSpecList.has_key(site):
return self.siteSpecList[site]
else:
# return default site
return defSite
# check if site exists
def checkSite(self,site):
return self.siteSpecList.has_key(site)
# accessor for cloud
def getCloud(self,cloud):
if self.cloudSpec.has_key(cloud):
return self.cloudSpec[cloud]
else:
# return sites in default cloud
ret = { 'source' : 'default',
'dest' : 'default',
'sites' : self.defCloudSites,
'transtimelo' : 2,
'transtimehi' : 1,
}
return ret
# accessor for cloud
def checkCloud(self,cloud):
if self.cloudSpec.has_key(cloud):
return True
else:
return False
# accessor for cloud list
def getCloudList(self):
return self.cloudSpec.keys()
# get ddm point
def getDdmEndpoint(self,siteID,storageToken):
if not self.checkSite(siteID):
return None
siteSpec = self.getSite(siteID)
if siteSpec.setokens.has_key(storageToken):
return siteSpec.setokens[storageToken]
return siteSpec.ddm
|
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=relative-import
"""Blink IDL Intermediate Representation (IR) classes.
Classes are primarily constructors, which build an IdlDefinitions object
(and various contained objects) from an AST (produced by blink_idl_parser).
IR stores typedefs and they are resolved by the code generator.
Typedef resolution uses some auxiliary classes and OOP techniques to make this
a generic call. See TypedefResolver class in code_generator_v8.py.
Class hierarchy (mostly containment, '<' for inheritance):
IdlDefinitions
IdlCallbackFunction < TypedObject
IdlEnum :: FIXME: remove, just use a dict for enums
IdlInterface
IdlAttribute < TypedObject
IdlConstant < TypedObject
IdlLiteral
IdlOperation < TypedObject
IdlArgument < TypedObject
IdlSerializer
IdlStringifier
IdlIterable < IdlIterableOrMaplikeOrSetlike
IdlMaplike < IdlIterableOrMaplikeOrSetlike
IdlSetlike < IdlIterableOrMaplikeOrSetlike
IdlException < IdlInterface
(same contents as IdlInterface)
TypedObject :: Object with one or more attributes that is a type.
IdlArgument is 'picklable', as it is stored in interfaces_info.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import abc
from idl_types import IdlArrayType
from idl_types import IdlFrozenArrayType
from idl_types import IdlNullableType
from idl_types import IdlPromiseType
from idl_types import IdlRecordType
from idl_types import IdlSequenceType
from idl_types import IdlType
from idl_types import IdlUnionType
SPECIAL_KEYWORD_LIST = ['LEGACYCALLER', 'GETTER', 'SETTER', 'DELETER']
################################################################################
# TypedObject
################################################################################
class TypedObject(object):
"""Object with a type, such as an Attribute or Operation (return value).
The type can be an actual type, or can be a typedef, which must be resolved
by the TypedefResolver before passing data to the code generator.
"""
__metaclass__ = abc.ABCMeta
idl_type_attributes = ('idl_type',)
################################################################################
# Definitions (main container class)
################################################################################
class IdlDefinitions(object):
def __init__(self, node):
"""Args: node: AST root node, class == 'File'"""
self.callback_functions = {}
self.dictionaries = {}
self.enumerations = {}
self.implements = []
self.interfaces = {}
self.typedefs = {}
node_class = node.GetClass()
if node_class != 'File':
raise ValueError('Unrecognized node class: %s' % node_class)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Interface':
interface = IdlInterface(child)
self.interfaces[interface.name] = interface
elif child_class == 'Exception':
exception = IdlException(child)
# For simplicity, treat exceptions as interfaces
self.interfaces[exception.name] = exception
elif child_class == 'Typedef':
typedef = IdlTypedef(child)
self.typedefs[typedef.name] = typedef
elif child_class == 'Enum':
enumeration = IdlEnum(child)
self.enumerations[enumeration.name] = enumeration
elif child_class == 'Callback':
callback_function = IdlCallbackFunction(child)
self.callback_functions[callback_function.name] = callback_function
elif child_class == 'Implements':
self.implements.append(IdlImplement(child))
elif child_class == 'Dictionary':
dictionary = IdlDictionary(child)
self.dictionaries[dictionary.name] = dictionary
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_definitions(self)
for interface in self.interfaces.itervalues():
interface.accept(visitor)
for callback_function in self.callback_functions.itervalues():
callback_function.accept(visitor)
for dictionary in self.dictionaries.itervalues():
dictionary.accept(visitor)
for enumeration in self.enumerations.itervalues():
enumeration.accept(visitor)
for implement in self.implements:
implement.accept(visitor)
for typedef in self.typedefs.itervalues():
typedef.accept(visitor)
def update(self, other):
"""Update with additional IdlDefinitions."""
for interface_name, new_interface in other.interfaces.iteritems():
if not new_interface.is_partial:
# Add as new interface
self.interfaces[interface_name] = new_interface
else:
# Merge partial to existing interface
try:
self.interfaces[interface_name].merge(new_interface)
except KeyError:
raise Exception('Tried to merge partial interface for {0}, '
'but no existing interface by that name'
.format(interface_name))
# Merge callbacks and enumerations
self.enumerations.update(other.enumerations)
self.callback_functions.update(other.callback_functions)
################################################################################
# Callback Functions
################################################################################
class IdlCallbackFunction(TypedObject):
def __init__(self, node):
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
arguments_node = children[1]
if num_children == 3:
ext_attributes_node = children[2]
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(ext_attributes_node))
else:
self.extended_attributes = {}
arguments_node_class = arguments_node.GetClass()
if arguments_node_class != 'Arguments':
raise ValueError('Expected Arguments node, got %s' % arguments_node_class)
self.name = node.GetName()
self.idl_type = type_node_to_type(type_node)
self.arguments = arguments_node_to_arguments(arguments_node)
def accept(self, visitor):
visitor.visit_callback_function(self)
for argument in self.arguments:
argument.accept(visitor)
################################################################################
# Dictionary
################################################################################
class IdlDictionary(object):
def __init__(self, node):
self.extended_attributes = {}
self.is_partial = bool(node.GetProperty('Partial'))
self.name = node.GetName()
self.members = []
self.parent = None
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Key':
self.members.append(IdlDictionaryMember(child))
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_dictionary(self)
for member in self.members:
member.accept(visitor)
class IdlDictionaryMember(TypedObject):
def __init__(self, node):
self.default_value = None
self.extended_attributes = {}
self.idl_type = None
self.is_required = bool(node.GetProperty('REQUIRED'))
self.name = node.GetName()
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_dictionary_member(self)
################################################################################
# Enumerations
################################################################################
class IdlEnum(object):
def __init__(self, node):
self.name = node.GetName()
self.values = []
for child in node.GetChildren():
self.values.append(child.GetName())
def accept(self, visitor):
visitor.visit_enumeration(self)
################################################################################
# Typedefs
################################################################################
class IdlTypedef(object):
idl_type_attributes = ('idl_type',)
def __init__(self, node):
self.name = node.GetName()
self.idl_type = typedef_node_to_type(node)
def accept(self, visitor):
visitor.visit_typedef(self)
################################################################################
# Interfaces and Exceptions
################################################################################
class IdlInterface(object):
def __init__(self, node=None):
self.attributes = []
self.constants = []
self.constructors = []
self.custom_constructors = []
self.extended_attributes = {}
self.operations = []
self.parent = None
self.serializer = None
self.stringifier = None
self.iterable = None
self.has_indexed_elements = False
self.maplike = None
self.setlike = None
self.original_interface = None
self.partial_interfaces = []
if not node: # Early exit for IdlException.__init__
return
self.is_callback = bool(node.GetProperty('CALLBACK'))
self.is_exception = False
# FIXME: uppercase 'Partial' => 'PARTIAL' in base IDL parser
self.is_partial = bool(node.GetProperty('Partial'))
self.name = node.GetName()
self.idl_type = IdlType(self.name)
has_indexed_property_getter = False
has_integer_typed_length = False
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attr = IdlAttribute(child)
if attr.idl_type.is_integer_type and attr.name == 'length':
has_integer_typed_length = True
self.attributes.append(attr)
elif child_class == 'Const':
self.constants.append(IdlConstant(child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'Operation':
op = IdlOperation(child)
if 'getter' in op.specials and str(op.arguments[0].idl_type) == 'unsigned long':
has_indexed_property_getter = True
self.operations.append(op)
elif child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Serializer':
self.serializer = IdlSerializer(child)
self.process_serializer()
elif child_class == 'Stringifier':
self.stringifier = IdlStringifier(child)
self.process_stringifier()
elif child_class == 'Iterable':
self.iterable = IdlIterable(child)
elif child_class == 'Maplike':
self.maplike = IdlMaplike(child)
elif child_class == 'Setlike':
self.setlike = IdlSetlike(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
if len(filter(None, [self.iterable, self.maplike, self.setlike])) > 1:
raise ValueError('Interface can only have one of iterable<>, maplike<> and setlike<>.')
if has_integer_typed_length and has_indexed_property_getter:
self.has_indexed_elements = True
else:
if self.iterable is not None and self.iterable.key_type is None:
raise ValueError('Value iterators (iterable<V>) must be accompanied by an indexed '
'property getter and an integer-typed length attribute.')
def accept(self, visitor):
visitor.visit_interface(self)
for attribute in self.attributes:
attribute.accept(visitor)
for constant in self.constants:
constant.accept(visitor)
for constructor in self.constructors:
constructor.accept(visitor)
for custom_constructor in self.custom_constructors:
custom_constructor.accept(visitor)
for operation in self.operations:
operation.accept(visitor)
if self.iterable:
self.iterable.accept(visitor)
elif self.maplike:
self.maplike.accept(visitor)
elif self.setlike:
self.setlike.accept(visitor)
def process_serializer(self):
"""Add the serializer's named operation child, if it has one, as a regular
operation of this interface."""
if self.serializer.operation:
self.operations.append(self.serializer.operation)
def process_stringifier(self):
"""Add the stringifier's attribute or named operation child, if it has
one, as a regular attribute/operation of this interface."""
if self.stringifier.attribute:
self.attributes.append(self.stringifier.attribute)
elif self.stringifier.operation:
self.operations.append(self.stringifier.operation)
def merge(self, other):
"""Merge in another interface's members (e.g., partial interface)"""
self.attributes.extend(other.attributes)
self.constants.extend(other.constants)
self.operations.extend(other.operations)
if self.serializer is None:
self.serializer = other.serializer
if self.stringifier is None:
self.stringifier = other.stringifier
class IdlException(IdlInterface):
# Properly exceptions and interfaces are distinct, and thus should inherit a
# common base class (say, "IdlExceptionOrInterface").
# However, there is only one exception (DOMException), and new exceptions
# are not expected. Thus it is easier to implement exceptions as a
# restricted subclass of interfaces.
# http://www.w3.org/TR/WebIDL/#idl-exceptions
def __init__(self, node):
# Exceptions are similar to Interfaces, but simpler
IdlInterface.__init__(self)
self.is_callback = False
self.is_exception = True
self.is_partial = False
self.name = node.GetName()
self.idl_type = IdlType(self.name)
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attribute = IdlAttribute(child)
self.attributes.append(attribute)
elif child_class == 'Const':
self.constants.append(IdlConstant(child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'ExceptionOperation':
self.operations.append(IdlOperation.from_exception_operation_node(child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Attributes
################################################################################
class IdlAttribute(TypedObject):
def __init__(self, node=None):
self.is_read_only = bool(node.GetProperty('READONLY')) if node else False
self.is_static = bool(node.GetProperty('STATIC')) if node else False
self.name = node.GetName() if node else None
self.idl_type = None
self.extended_attributes = {}
if node:
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_attribute(self)
################################################################################
# Constants
################################################################################
class IdlConstant(TypedObject):
def __init__(self, node):
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
value_node = children[1]
value_node_class = value_node.GetClass()
if value_node_class != 'Value':
raise ValueError('Expected Value node, got %s' % value_node_class)
self.name = node.GetName()
# ConstType is more limited than Type, so subtree is smaller and
# we don't use the full type_node_to_type function.
self.idl_type = type_node_inner_to_type(type_node)
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
if value_node.GetProperty('TYPE') == 'float':
self.value = value_node.GetProperty('VALUE')
else:
self.value = value_node.GetName()
if num_children == 3:
ext_attributes_node = children[2]
self.extended_attributes = ext_attributes_node_to_extended_attributes(ext_attributes_node)
else:
self.extended_attributes = {}
def accept(self, visitor):
visitor.visit_constant(self)
################################################################################
# Literals
################################################################################
class IdlLiteral(object):
def __init__(self, idl_type, value):
self.idl_type = idl_type
self.value = value
self.is_null = False
def __str__(self):
if self.idl_type == 'DOMString':
if self.value:
return '"%s"' % self.value
else:
return '""'
if self.idl_type == 'integer':
return '%d' % self.value
if self.idl_type == 'float':
return '%g' % self.value
if self.idl_type == 'boolean':
return 'true' if self.value else 'false'
raise ValueError('Unsupported literal type: %s' % self.idl_type)
class IdlLiteralNull(IdlLiteral):
def __init__(self):
self.idl_type = 'NULL'
self.value = None
self.is_null = True
def __str__(self):
return 'NULL'
def default_node_to_idl_literal(node):
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
idl_type = node.GetProperty('TYPE')
if idl_type == 'DOMString':
value = node.GetProperty('NAME')
if '"' in value or '\\' in value:
raise ValueError('Unsupported string value: %r' % value)
return IdlLiteral(idl_type, value)
if idl_type == 'integer':
return IdlLiteral(idl_type, int(node.GetProperty('NAME'), base=0))
if idl_type == 'float':
return IdlLiteral(idl_type, float(node.GetProperty('VALUE')))
if idl_type in ['boolean', 'sequence']:
return IdlLiteral(idl_type, node.GetProperty('VALUE'))
if idl_type == 'NULL':
return IdlLiteralNull()
raise ValueError('Unrecognized default value type: %s' % idl_type)
################################################################################
# Operations
################################################################################
class IdlOperation(TypedObject):
def __init__(self, node=None):
self.arguments = []
self.extended_attributes = {}
self.specials = []
self.is_constructor = False
self.idl_type = None
self.is_static = False
if not node:
return
self.name = node.GetName() # FIXME: should just be: or ''
# FIXME: AST should use None internally
if self.name == '_unnamed_':
self.name = ''
self.is_static = bool(node.GetProperty('STATIC'))
property_dictionary = node.GetProperties()
for special_keyword in SPECIAL_KEYWORD_LIST:
if special_keyword in property_dictionary:
self.specials.append(special_keyword.lower())
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Arguments':
self.arguments = arguments_node_to_arguments(child)
elif child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
@classmethod
def from_exception_operation_node(cls, node):
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# FIXME: can we remove this? replace with a stringifier?
operation = cls()
operation.name = node.GetName()
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('ExceptionOperation node with %s children, expected 1 or 2' % len(children))
type_node = children[0]
operation.idl_type = type_node_to_type(type_node)
if len(children) > 1:
ext_attributes_node = children[1]
operation.extended_attributes = ext_attributes_node_to_extended_attributes(ext_attributes_node)
return operation
@classmethod
def constructor_from_arguments_node(cls, name, arguments_node):
constructor = cls()
constructor.name = name
constructor.arguments = arguments_node_to_arguments(arguments_node)
constructor.is_constructor = True
return constructor
def accept(self, visitor):
visitor.visit_operation(self)
for argument in self.arguments:
argument.accept(visitor)
################################################################################
# Arguments
################################################################################
class IdlArgument(TypedObject):
def __init__(self, node=None):
self.extended_attributes = {}
self.idl_type = None
self.is_optional = False # syntax: (optional T)
self.is_variadic = False # syntax: (T...)
self.default_value = None
if not node:
return
self.is_optional = node.GetProperty('OPTIONAL')
self.name = node.GetName()
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(child)
elif child_class == 'Argument':
child_name = child.GetName()
if child_name != '...':
raise ValueError('Unrecognized Argument node; expected "...", got "%s"' % child_name)
self.is_variadic = bool(child.GetProperty('ELLIPSIS'))
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def accept(self, visitor):
visitor.visit_argument(self)
def arguments_node_to_arguments(node):
# [Constructor] and [CustomConstructor] without arguments (the bare form)
# have None instead of an arguments node, but have the same meaning as using
# an empty argument list, [Constructor()], so special-case this.
# http://www.w3.org/TR/WebIDL/#Constructor
if node is None:
return []
return [IdlArgument(argument_node) for argument_node in node.GetChildren()]
################################################################################
# Serializers
################################################################################
class IdlSerializer(object):
def __init__(self, node):
self.attribute_name = node.GetProperty('ATTRIBUTE')
self.attribute_names = None
self.operation = None
self.extended_attributes = {}
self.is_attribute = False
self.is_getter = False
self.is_inherit = False
self.is_list = False
self.is_map = False
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Operation':
self.operation = IdlOperation(child)
elif child_class == 'List':
self.is_list = True
self.is_getter = bool(child.GetProperty('GETTER'))
self.attributes = child.GetProperty('ATTRIBUTES')
elif child_class == 'Map':
self.is_map = True
self.is_attribute = bool(child.GetProperty('ATTRIBUTE'))
self.is_getter = bool(child.GetProperty('GETTER'))
self.is_inherit = bool(child.GetProperty('INHERIT'))
self.attributes = child.GetProperty('ATTRIBUTES')
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Stringifiers
################################################################################
class IdlStringifier(object):
def __init__(self, node):
self.attribute = None
self.operation = None
self.extended_attributes = {}
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Attribute':
self.attribute = IdlAttribute(child)
elif child_class == 'Operation':
operation = IdlOperation(child)
if operation.name:
self.operation = operation
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Copy the stringifier's extended attributes (such as [Unforgable]) onto
# the underlying attribute or operation, if there is one.
if self.attribute or self.operation:
(self.attribute or self.operation).extended_attributes.update(
self.extended_attributes)
################################################################################
# Iterable, Maplike, Setlike
################################################################################
class IdlIterableOrMaplikeOrSetlike(TypedObject):
def __init__(self, node):
self.extended_attributes = {}
self.type_children = []
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(child)
elif child_class == 'Type':
self.type_children.append(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
class IdlIterable(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('key_type', 'value_type')
def __init__(self, node):
super(IdlIterable, self).__init__(node)
if len(self.type_children) == 1:
self.key_type = None
self.value_type = type_node_to_type(self.type_children[0])
elif len(self.type_children) == 2:
self.key_type = type_node_to_type(self.type_children[0])
self.value_type = type_node_to_type(self.type_children[1])
else:
raise ValueError('Unexpected number of type children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_iterable(self)
class IdlMaplike(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('key_type', 'value_type')
def __init__(self, node):
super(IdlMaplike, self).__init__(node)
self.is_read_only = bool(node.GetProperty('READONLY'))
if len(self.type_children) == 2:
self.key_type = type_node_to_type(self.type_children[0])
self.value_type = type_node_to_type(self.type_children[1])
else:
raise ValueError('Unexpected number of children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_maplike(self)
class IdlSetlike(IdlIterableOrMaplikeOrSetlike):
idl_type_attributes = ('value_type',)
def __init__(self, node):
super(IdlSetlike, self).__init__(node)
self.is_read_only = bool(node.GetProperty('READONLY'))
if len(self.type_children) == 1:
self.value_type = type_node_to_type(self.type_children[0])
else:
raise ValueError('Unexpected number of children: %d' % len(self.type_children))
del self.type_children
def accept(self, visitor):
visitor.visit_setlike(self)
################################################################################
# Implement statements
################################################################################
class IdlImplement(object):
def __init__(self, node):
self.left_interface = node.GetName()
self.right_interface = node.GetProperty('REFERENCE')
def accept(self, visitor):
visitor.visit_implement(self)
################################################################################
# Extended attributes
################################################################################
class Exposure:
"""An Exposure holds one Exposed or RuntimeEnabled condition.
Each exposure has two properties: exposed and runtime_enabled.
Exposure(e, r) corresponds to [Exposed(e r)]. Exposure(e) corresponds to
[Exposed=e].
"""
def __init__(self, exposed, runtime_enabled=None):
self.exposed = exposed
self.runtime_enabled = runtime_enabled
def ext_attributes_node_to_extended_attributes(node):
"""
Returns:
Dictionary of {ExtAttributeName: ExtAttributeValue}.
Value is usually a string, with these exceptions:
Constructors: value is a list of Arguments nodes, corresponding to
possible signatures of the constructor.
CustomConstructors: value is a list of Arguments nodes, corresponding to
possible signatures of the custom constructor.
NamedConstructor: value is a Call node, corresponding to the single
signature of the named constructor.
SetWrapperReferenceTo: value is an Arguments node.
"""
# Primarily just make a dictionary from the children.
# The only complexity is handling various types of constructors:
# Constructors and Custom Constructors can have duplicate entries due to
# overloading, and thus are stored in temporary lists.
# However, Named Constructors cannot be overloaded, and thus do not have
# a list.
# FIXME: move Constructor logic into separate function, instead of modifying
# extended attributes in-place.
constructors = []
custom_constructors = []
extended_attributes = {}
def child_node(extended_attribute_node):
children = extended_attribute_node.GetChildren()
if not children:
return None
if len(children) > 1:
raise ValueError('ExtAttributes node with %s children, expected at most 1' % len(children))
return children[0]
extended_attribute_node_list = node.GetChildren()
for extended_attribute_node in extended_attribute_node_list:
name = extended_attribute_node.GetName()
child = child_node(extended_attribute_node)
child_class = child and child.GetClass()
if name == 'Constructor':
if child_class and child_class != 'Arguments':
raise ValueError('Constructor only supports Arguments as child, but has child of class: %s' % child_class)
constructors.append(child)
elif name == 'CustomConstructor':
if child_class and child_class != 'Arguments':
raise ValueError('[CustomConstructor] only supports Arguments as child, but has child of class: %s' % child_class)
custom_constructors.append(child)
elif name == 'NamedConstructor':
if child_class and child_class != 'Call':
raise ValueError('[NamedConstructor] only supports Call as child, but has child of class: %s' % child_class)
extended_attributes[name] = child
elif name == 'SetWrapperReferenceTo':
if not child:
raise ValueError('[SetWrapperReferenceTo] requires a child, but has none.')
children = child.GetChildren()
if len(children) != 1:
raise ValueError('[SetWrapperReferenceTo] supports only one child.')
if child_class != 'Arguments':
raise ValueError('[SetWrapperReferenceTo] only supports Arguments as child, but has child of class: %s' % child_class)
extended_attributes[name] = IdlArgument(children[0])
elif name == 'Exposed':
if child_class and child_class != 'Arguments':
raise ValueError('[Exposed] only supports Arguments as child, but has child of class: %s' % child_class)
exposures = []
if child_class == 'Arguments':
exposures = [Exposure(exposed=str(arg.idl_type),
runtime_enabled=arg.name)
for arg in arguments_node_to_arguments(child)]
else:
value = extended_attribute_node.GetProperty('VALUE')
if type(value) is str:
exposures = [Exposure(exposed=value)]
else:
exposures = [Exposure(exposed=v) for v in value]
extended_attributes[name] = exposures
elif child:
raise ValueError('ExtAttributes node with unexpected children: %s' % name)
else:
value = extended_attribute_node.GetProperty('VALUE')
extended_attributes[name] = value
# Store constructors and custom constructors in special list attributes,
# which are deleted later. Note plural in key.
if constructors:
extended_attributes['Constructors'] = constructors
if custom_constructors:
extended_attributes['CustomConstructors'] = custom_constructors
return extended_attributes
def extended_attributes_to_constructors(extended_attributes):
"""Returns constructors and custom_constructors (lists of IdlOperations).
Auxiliary function for IdlInterface.__init__.
"""
constructor_list = extended_attributes.get('Constructors', [])
constructors = [
IdlOperation.constructor_from_arguments_node('Constructor', arguments_node)
for arguments_node in constructor_list]
custom_constructor_list = extended_attributes.get('CustomConstructors', [])
custom_constructors = [
IdlOperation.constructor_from_arguments_node('CustomConstructor', arguments_node)
for arguments_node in custom_constructor_list]
if 'NamedConstructor' in extended_attributes:
# FIXME: support overloaded named constructors, and make homogeneous
name = 'NamedConstructor'
call_node = extended_attributes['NamedConstructor']
extended_attributes['NamedConstructor'] = call_node.GetName()
children = call_node.GetChildren()
if len(children) != 1:
raise ValueError('NamedConstructor node expects 1 child, got %s.' % len(children))
arguments_node = children[0]
named_constructor = IdlOperation.constructor_from_arguments_node('NamedConstructor', arguments_node)
# FIXME: should return named_constructor separately; appended for Perl
constructors.append(named_constructor)
return constructors, custom_constructors
def clear_constructor_attributes(extended_attributes):
# Deletes Constructor*s* (plural), sets Constructor (singular)
if 'Constructors' in extended_attributes:
del extended_attributes['Constructors']
extended_attributes['Constructor'] = None
if 'CustomConstructors' in extended_attributes:
del extended_attributes['CustomConstructors']
extended_attributes['CustomConstructor'] = None
################################################################################
# Types
################################################################################
def type_node_to_type(node):
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('Type node expects 1 or 2 children (type + optional array []), got %s (multi-dimensional arrays are not supported).' % len(children))
base_type = type_node_inner_to_type(children[0])
if node.GetProperty('NULLABLE'):
base_type = IdlNullableType(base_type)
if len(children) == 2:
array_node = children[1]
array_node_class = array_node.GetClass()
if array_node_class != 'Array':
raise ValueError('Expected Array node as TypeSuffix, got %s node.' % array_node_class)
array_type = IdlArrayType(base_type)
if array_node.GetProperty('NULLABLE'):
return IdlNullableType(array_type)
return array_type
return base_type
def type_node_inner_to_type(node):
node_class = node.GetClass()
# Note Type*r*ef, not Typedef, meaning the type is an identifier, thus
# either a typedef shorthand (but not a Typedef declaration itself) or an
# interface type. We do not distinguish these, and just use the type name.
if node_class in ['PrimitiveType', 'StringType', 'Typeref']:
# unrestricted syntax: unrestricted double | unrestricted float
is_unrestricted = bool(node.GetProperty('UNRESTRICTED'))
return IdlType(node.GetName(), is_unrestricted=is_unrestricted)
elif node_class == 'Any':
return IdlType('any')
elif node_class in ['Sequence', 'FrozenArray']:
return sequence_node_to_type(node)
elif node_class == 'UnionType':
return union_type_node_to_idl_union_type(node)
elif node_class == 'Promise':
return promise_node_to_type(node)
elif node_class == 'Record':
return record_node_to_type(node)
raise ValueError('Unrecognized node class: %s' % node_class)
def record_node_to_type(node):
children = node.GetChildren()
if len(children) != 2:
raise ValueError('record<K,V> node expects exactly 2 children, got %d' % (len(children)))
key_child = children[0]
value_child = children[1]
if key_child.GetClass() != 'StringType':
raise ValueError('Keys in record<K,V> nodes must be string types.')
if value_child.GetClass() != 'Type':
raise ValueError('Unrecognized node class for record<K,V> value: %s' % value_child.GetClass())
return IdlRecordType(IdlType(key_child.GetName()), type_node_to_type(value_child))
def sequence_node_to_type(node):
children = node.GetChildren()
class_name = node.GetClass()
if len(children) != 1:
raise ValueError('%s node expects exactly 1 child, got %s' % (class_name, len(children)))
sequence_child = children[0]
sequence_child_class = sequence_child.GetClass()
if sequence_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
element_type = type_node_to_type(sequence_child)
if class_name == 'Sequence':
sequence_type = IdlSequenceType(element_type)
elif class_name == 'FrozenArray':
sequence_type = IdlFrozenArrayType(element_type)
else:
raise ValueError('Unexpected node: %s' % class_name)
if node.GetProperty('NULLABLE'):
return IdlNullableType(sequence_type)
return sequence_type
def typedef_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Typedef node with %s children, expected 1' % len(children))
child = children[0]
child_class = child.GetClass()
if child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % child_class)
return type_node_to_type(child)
def union_type_node_to_idl_union_type(node):
member_types = [type_node_to_type(member_type_node)
for member_type_node in node.GetChildren()]
return IdlUnionType(member_types)
def promise_node_to_type(node):
children = node.GetChildren()
class_name = node.GetClass()
if len(children) != 1:
raise ValueError('%s node expects exactly 1 child, got %s' % (class_name, len(children)))
promise_child = children[0]
promise_child_class = promise_child.GetClass()
if promise_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
element_type = type_node_to_type(promise_child)
if class_name == 'Promise':
promise_type = IdlPromiseType(element_type)
else:
raise ValueError('Unexpected node: %s' % class_name)
return promise_type
################################################################################
# Visitor
################################################################################
class Visitor(object):
"""Abstract visitor class for IDL definitions traverse."""
def visit_definitions(self, definitions):
pass
def visit_typed_object(self, typed_object):
pass
def visit_callback_function(self, callback_function):
self.visit_typed_object(callback_function)
def visit_dictionary(self, dictionary):
pass
def visit_dictionary_member(self, member):
self.visit_typed_object(member)
def visit_enumeration(self, enumeration):
pass
def visit_implement(self, implement):
pass
def visit_interface(self, interface):
pass
def visit_typedef(self, typedef):
self.visit_typed_object(typedef)
def visit_attribute(self, attribute):
self.visit_typed_object(attribute)
def visit_constant(self, constant):
self.visit_typed_object(constant)
def visit_operation(self, operation):
self.visit_typed_object(operation)
def visit_argument(self, argument):
self.visit_typed_object(argument)
def visit_iterable(self, iterable):
self.visit_typed_object(iterable)
def visit_maplike(self, maplike):
self.visit_typed_object(maplike)
def visit_setlike(self, setlike):
self.visit_typed_object(setlike)
|
|
# -*- coding: utf-8 -*-
""" Sahana Eden Scenario Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3ScenarioModel",
"S3ScenarioAssetModel",
"S3ScenarioHRModel",
"S3ScenarioMapModel",
"S3ScenarioOrganisationModel",
"S3ScenarioSiteModel",
"S3ScenarioTaskModel",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3ScenarioModel(S3Model):
"""
Scenario Model
http://eden.sahanafoundation.org/wiki/BluePrintScenario
Link tables are in separate classes to increase performance & allow
the system to be more modular
"""
names = ["scenario_scenario",
"scenario_scenario_id",
]
def model(self):
T = current.T
db = current.db
add_component = self.add_component
# ---------------------------------------------------------------------
# Scenarios
#
# Scenarios are Templates for Incidents to plan what resources are required
#
tablename = "scenario_scenario"
table = self.define_table(tablename,
self.event_incident_type_id(),
Field("name", notnull=True,
length=64, # Mayon compatiblity
label=T("Name")),
s3_comments(),
*s3_meta_fields())
self.configure(tablename,
# Open Map Config to set the default Location
create_next=URL(args=["[id]", "config"]),
deduplicate=self.scenario_duplicate,
)
# CRUD strings
ADD_SCENARIO = T("New Scenario")
current.response.s3.crud_strings[tablename] = Storage(
title_create = ADD_SCENARIO,
title_display = T("Scenario Details"),
title_list = T("Scenarios"),
title_update = T("Edit Scenario"),
title_search = T("Search Scenarios"),
title_upload = T("Import Scenarios"),
subtitle_create = T("Add New Scenario"),
label_list_button = T("List Scenarios"),
label_create_button = ADD_SCENARIO,
label_delete_button = T("Delete Scenario"),
msg_record_created = T("Scenario added"),
msg_record_modified = T("Scenario updated"),
msg_record_deleted = T("Scenario deleted"),
msg_list_empty = T("No Scenarios currently registered"))
# Components
# Tasks
add_component("project_task",
scenario_scenario=Storage(
link="scenario_task",
joinby="scenario_id",
key="task_id",
# @ToDo: Widget to handle embedded LocationSelector
#actuate="embed",
actuate="link",
autocomplete="name",
autodelete=False))
# Human Resources
add_component("hrm_human_resource",
scenario_scenario=Storage(
link="scenario_human_resource",
joinby="scenario_id",
key="human_resource_id",
# @ToDo: Widget to handle embedded AddPersonWidget
#actuate="embed",
actuate="link",
autocomplete="name",
autodelete=False))
# Assets
add_component("asset_asset",
scenario_scenario=Storage(
link="scenario_asset",
joinby="scenario_id",
key="asset_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Facilities
add_component("scenario_site",
scenario_scenario="scenario_id")
# Organisations
add_component("org_organisation",
scenario_scenario=Storage(
link="scenario_organisation",
joinby="scenario_id",
key="organisation_id",
actuate="embed",
autocomplete="name",
autodelete=False))
# Map Config as a component of Scenarios
add_component("gis_config",
scenario_scenario=Storage(
link="scenario_config",
joinby="scenario_id",
multiple=False,
key="config_id",
actuate="replace",
autocomplete="name",
autodelete=True))
scenario_id = S3ReusableField("scenario_id", table,
sortby="name",
requires = IS_NULL_OR(
IS_ONE_OF(db, "scenario_scenario.id",
self.scenario_represent,
orderby="scenario_scenario.name",
sort=True)),
represent = self.scenario_represent,
label = T("Scenario"),
ondelete = "SET NULL",
# Comment these to use a Dropdown & not an Autocomplete
#widget = S3AutocompleteWidget()
#comment = DIV(_class="tooltip",
# _title="%s|%s" % (T("Scenario"),
# T("Enter some characters to bring up a list of possible matches")))
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage(
scenario_scenario_id = scenario_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
return Storage(
scenario_scenario_id = S3ReusableField("scenario_id",
"integer",
readable=False,
writable=False),
)
# ---------------------------------------------------------------------
@staticmethod
def scenario_represent(id, row=None):
""" FK representation """
if row:
return row.name
elif not id:
return current.messages.NONE
db = current.db
table = db.scenario_scenario
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def scenario_duplicate(item):
"""
Deduplication of Scenarios
"""
if item.tablename != "scenario_scenario":
return
data = item.data
name = data.get("name", None)
table = item.table
query = (table.name == name)
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3ScenarioAssetModel(S3Model):
"""
Link Assets to Scenarios
"""
names = ["scenario_asset"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Assets
# @ToDo: Use generic Supply Items not Asset instances? (Typed resources)
# Depends on the scale of the scenario!
# So support both...
# @ToDo: Search Widget
tablename = "scenario_asset"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.asset_asset_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Asset"),
title_display = T("Asset Details"),
title_list = T("Assets"),
title_update = T("Edit Asset"),
title_search = T("Search Assets"),
subtitle_create = T("Add New Asset"),
label_list_button = T("List Assets"),
label_create_button = T("Add Asset"),
label_delete_button = T("Remove Asset from this scenario"),
msg_record_created = T("Asset added"),
msg_record_modified = T("Asset updated"),
msg_record_deleted = T("Asset removed"),
msg_list_empty = T("No assets currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioHRModel(S3Model):
"""
Link Human Resources (Staff/Volunteers) to Scenarios
"""
names = ["scenario_human_resource"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Staff/Volunteers
# @ToDo: Use Positions, not individual HRs (Typed resources?)
# @ToDo: Search Widget
tablename = "scenario_human_resource"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.hrm_human_resource_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Human Resource"),
title_display = T("Human Resource Details"),
title_list = T("Human Resources"),
title_update = T("Edit Human Resource"),
title_search = T("Search Human Resources"),
subtitle_create = T("Add New Human Resource"),
label_list_button = T("List Human Resources"),
label_create_button = T("Add Human Resource"),
label_delete_button = T("Remove Human Resource from this scenario"),
msg_record_created = T("Human Resource added"),
msg_record_modified = T("Human Resource updated"),
msg_record_deleted = T("Human Resource removed"),
msg_list_empty = T("No Human Resources currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioMapModel(S3Model):
"""
Link Map Configs to Scenarios
"""
names = ["scenario_config"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Link Table for Map Config used in this Scenario
# @ToDo: Widget suitable for a 1-1 relationship where we can assume
# that the Config is pre-created
tablename = "scenario_config"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.gis_config_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Map Configuration"),
title_display = T("Map Configuration Details"),
title_list = T("Map Configurations"),
title_update = T("Edit Map Configuration"),
title_search = T("Search Map Configurations"),
subtitle_create = T("Add New Map Configuration"),
label_list_button = T("List Map Configurations"),
label_create_button = T("Add Map Configuration"),
label_delete_button = T("Remove Map Configuration from this scenario"),
msg_record_created = T("Map Configuration added"),
msg_record_modified = T("Map Configuration updated"),
msg_record_deleted = T("Map Configuration removed"),
msg_list_empty = T("No Map Configurations currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioOrganisationModel(S3Model):
"""
Link Organisations to Scenarios
- people to keep informed
- people to mobilise
"""
names = ["scenario_organisation"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Organisations
# @ToDo: Search Widget
tablename = "scenario_organisation"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.org_organisation_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Organization"),
title_display = T("Organization Details"),
title_list = T("Organizations"),
title_update = T("Edit Organization"),
title_search = T("Search Organizations"),
subtitle_create = T("Add New Organization"),
label_list_button = T("List Organizations"),
label_create_button = T("Add Organization"),
label_delete_button = T("Remove Organization from this scenario"),
msg_record_created = T("Organization added"),
msg_record_modified = T("Organization updated"),
msg_record_deleted = T("Organization removed"),
msg_list_empty = T("No organizations currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioSiteModel(S3Model):
"""
Link Sites (Facilities) to Scenarios
"""
names = ["scenario_site"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Facilities
# @ToDo: Search Widget
tablename = "scenario_site"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.org_site_id,
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Facility"),
title_display = T("Facility Details"),
title_list = T("Facilities"),
title_update = T("Edit Facility"),
title_search = T("Search Facilities"),
subtitle_create = T("Add New Facility"),
label_list_button = T("List Facilities"),
label_create_button = T("Add Facility"),
label_delete_button = T("Remove Facility from this scenario"),
msg_record_created = T("Facility added"),
msg_record_modified = T("Facility updated"),
msg_record_deleted = T("Facility removed"),
msg_list_empty = T("No facilities currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# =============================================================================
class S3ScenarioTaskModel(S3Model):
"""
Link Tasks to Scenarios
@ToDo: Task Templates (like CAP Templates)
"""
names = ["scenario_task"]
def model(self):
T = current.T
# ---------------------------------------------------------------------
# Tasks
# Standing Tasks required for this Scenario
# @ToDo: Search Widget
tablename = "scenario_task"
table = self.define_table(tablename,
self.scenario_scenario_id(),
self.project_task_id(),
*s3_meta_fields())
current.response.s3.crud_strings[tablename] = Storage(
title_create = T("Add Task"),
title_display = T("Task Details"),
title_list = T("Tasks"),
title_update = T("Edit Task"),
title_search = T("Search Tasks"),
subtitle_create = T("Add New Task"),
label_list_button = T("List Tasks"),
label_create_button = T("Add Task"),
label_delete_button = T("Remove Task from this scenario"),
msg_record_created = T("Task added"),
msg_record_modified = T("Task updated"),
msg_record_deleted = T("Task removed"),
msg_list_empty = T("No tasks currently registered in this scenario"))
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
#
return Storage()
# END =========================================================================
|
|
"""
Vector Autoregressive Moving Average with eXogenous regressors model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
from statsmodels.compat.collections import OrderedDict
import pandas as pd
import numpy as np
from .kalman_filter import (
KalmanFilter, FilterResults, INVERT_UNIVARIATE, SOLVE_LU
)
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible,
constrain_stationary_multivariate, unconstrain_stationary_multivariate
)
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.vector_ar import var_model
import statsmodels.base.wrapper as wrap
from statsmodels.tools.sm_exceptions import (EstimationWarning, ValueWarning)
class VARMAX(MLEModel):
r"""
Vector Autoregressive Moving Average with eXogenous regressors model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`, , shaped nobs x k_endog.
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable
The (p,q) order of the model for the number of AR and MA parameters to
use.
trend : {'nc', 'c'}, optional
Parameter controlling the deterministic trend polynomial.
Can be specified as a string where 'c' indicates a constant intercept
and 'nc' indicates no intercept term.
error_cov_type : {'diagonal', 'unstructured'}, optional
The structure of the covariance matrix of the error term, where
"unstructured" puts no restrictions on the matrix and "diagonal"
requires it to be a diagonal matrix (uncorrelated errors). Default is
"unstructured".
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
order : iterable
The (p,q) order of the model for the number of AR and MA parameters to
use.
trend : {'nc', 'c'}, optional
Parameter controlling the deterministic trend polynomial.
Can be specified as a string where 'c' indicates a constant intercept
and 'nc' indicates no intercept term.
error_cov_type : {'diagonal', 'unstructured'}, optional
The structure of the covariance matrix of the error term, where
"unstructured" puts no restrictions on the matrix and "diagonal"
requires it to be a diagonal matrix (uncorrelated errors). Default is
"unstructured".
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
Notes
-----
Generically, the VARMAX model is specified (see for example chapter 18 of
[1]_):
.. math::
y_t = \nu + A_1 y_{t-1} + \dots + A_p y_{t-p} + B x_t + \epsilon_t +
M_1 \epsilon_{t-1} + \dots M_q \epsilon_{t-q}
where :math:`\epsilon_t \sim N(0, \Omega)`, and where :math:`y_t` is a
`k_endog x 1` vector. Additionally, this model allows considering the case
where the variables are measured with error.
Note that in the full VARMA(p,q) case there is a fundamental identification
problem in that the coefficient matrices :math:`\{A_i, M_j\}` are not
generally unique, meaning that for a given time series process there may
be multiple sets of matrices that equivalently represent it. See Chapter 12
of [1]_ for more informationl. Although this class can be used to estimate
VARMA(p,q) models, a warning is issued to remind users that no steps have
been taken to ensure identification in this case.
References
----------
.. [1] Lutkepohl, Helmut. 2007.
New Introduction to Multiple Time Series Analysis.
Berlin: Springer.
"""
def __init__(self, endog, exog=None, order=(1, 0), trend='c',
error_cov_type='unstructured', measurement_error=False,
enforce_stationarity=True, enforce_invertibility=True,
**kwargs):
# Model parameters
self.error_cov_type = error_cov_type
self.measurement_error = measurement_error
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
# Save the given orders
self.order = order
self.trend = trend
# Model orders
self.k_ar = int(order[0])
self.k_ma = int(order[1])
self.k_trend = int(self.trend == 'c')
# Check for valid model
if trend not in ['c', 'nc']:
raise ValueError('Invalid trend specification.')
if error_cov_type not in ['diagonal', 'unstructured']:
raise ValueError('Invalid error covariance matrix type'
' specification.')
if self.k_ar == 0 and self.k_ma == 0:
raise ValueError('Invalid VARMAX(p,q) specification; at least one'
' p,q must be greater than zero.')
# Warn for VARMA model
if self.k_ar > 0 and self.k_ma > 0:
warn('Estimation of VARMA(p,q) models is not generically robust,'
' due especially to identification issues.',
EstimationWarning)
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Note: at some point in the future might add state regression, as in
# SARIMAX.
self.mle_regression = self.k_exog > 0
# We need to have an array or pandas at this point
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Model order
# Used internally in various places
_min_k_ar = max(self.k_ar, 1)
self._k_order = _min_k_ar + self.k_ma
# Number of states
k_endog = endog.shape[1]
k_posdef = k_endog
k_states = k_endog * self._k_order
# By default, initialize as stationary
kwargs.setdefault('initialization', 'stationary')
# By default, use LU decomposition
kwargs.setdefault('inversion_method', INVERT_UNIVARIATE | SOLVE_LU)
# Initialize the state space model
super(VARMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Set as time-varying model if we have time-trend or exog
if self.k_exog > 0 or self.k_trend > 1:
self.ssm._time_invariant = False
# Initialize the parameters
self.parameters = OrderedDict()
self.parameters['trend'] = self.k_endog * self.k_trend
self.parameters['ar'] = self.k_endog**2 * self.k_ar
self.parameters['ma'] = self.k_endog**2 * self.k_ma
self.parameters['regression'] = self.k_endog * self.k_exog
if self.error_cov_type == 'diagonal':
self.parameters['state_cov'] = self.k_endog
# These parameters fill in a lower-triangular matrix which is then
# dotted with itself to get a positive definite matrix.
elif self.error_cov_type == 'unstructured':
self.parameters['state_cov'] = (
int(self.k_endog * (self.k_endog + 1) / 2)
)
self.parameters['obs_cov'] = self.k_endog * self.measurement_error
self.k_params = sum(self.parameters.values())
# Initialize known elements of the state space matrices
# If we have exog effects, then the state intercept needs to be
# time-varying
if self.k_exog > 0:
self.ssm['state_intercept'] = np.zeros((self.k_states, self.nobs))
# The design matrix is just an identity for the first k_endog states
idx = np.diag_indices(self.k_endog)
self.ssm[('design',) + idx] = 1
# The transition matrix is described in four blocks, where the upper
# left block is in companion form with the autoregressive coefficient
# matrices (so it is shaped k_endog * k_ar x k_endog * k_ar) ...
if self.k_ar > 0:
idx = np.diag_indices((self.k_ar - 1) * self.k_endog)
idx = idx[0] + self.k_endog, idx[1]
self.ssm[('transition',) + idx] = 1
# ... and the lower right block is in companion form with zeros as the
# coefficient matrices (it is shaped k_endog * k_ma x k_endog * k_ma).
idx = np.diag_indices((self.k_ma - 1) * self.k_endog)
idx = (idx[0] + (_min_k_ar + 1) * self.k_endog,
idx[1] + _min_k_ar * self.k_endog)
self.ssm[('transition',) + idx] = 1
# The selection matrix is described in two blocks, where the upper
# block selects the all k_posdef errors in the first k_endog rows
# (the upper block is shaped k_endog * k_ar x k) and the lower block
# also selects all k_posdef errors in the first k_endog rows (the lower
# block is shaped k_endog * k_ma x k).
idx = np.diag_indices(self.k_endog)
self.ssm[('selection',) + idx] = 1
idx = idx[0] + _min_k_ar * self.k_endog, idx[1]
if self.k_ma > 0:
self.ssm[('selection',) + idx] = 1
# Cache some indices
if self.trend == 'c' and self.k_exog == 0:
self._idx_state_intercept = np.s_['state_intercept', :k_endog]
elif self.k_exog > 0:
self._idx_state_intercept = np.s_['state_intercept', :k_endog, :]
if self.k_ar > 0:
self._idx_transition = np.s_['transition', :k_endog, :]
else:
self._idx_transition = np.s_['transition', :k_endog, k_endog:]
if self.error_cov_type == 'diagonal':
self._idx_state_cov = (
('state_cov',) + np.diag_indices(self.k_endog))
elif self.error_cov_type == 'unstructured':
self._idx_lower_state_cov = np.tril_indices(self.k_endog)
if self.measurement_error:
self._idx_obs_cov = ('obs_cov',) + np.diag_indices(self.k_endog)
# Cache some slices
def _slice(key, offset):
length = self.parameters[key]
param_slice = np.s_[offset:offset + length]
offset += length
return param_slice, offset
offset = 0
self._params_trend, offset = _slice('trend', offset)
self._params_ar, offset = _slice('ar', offset)
self._params_ma, offset = _slice('ma', offset)
self._params_regression, offset = _slice('regression', offset)
self._params_state_cov, offset = _slice('state_cov', offset)
self._params_obs_cov, offset = _slice('obs_cov', offset)
def filter(self, params, **kwargs):
kwargs.setdefault('results_class', VARMAXResults)
kwargs.setdefault('results_wrapper_class', VARMAXResultsWrapper)
return super(VARMAX, self).filter(params, **kwargs)
def smooth(self, params, **kwargs):
kwargs.setdefault('results_class', VARMAXResults)
kwargs.setdefault('results_wrapper_class', VARMAXResultsWrapper)
return super(VARMAX, self).smooth(params, **kwargs)
@property
def start_params(self):
params = np.zeros(self.k_params, dtype=np.float64)
# A. Run a multivariate regression to get beta estimates
endog = pd.DataFrame(self.endog.copy())
# Pandas < 0.13 didn't support the same type of DataFrame interpolation
try:
endog = endog.interpolate()
except TypeError:
pass
endog = endog.fillna(method='backfill').values
exog = self.exog.copy() if self.k_exog > 0 else None
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
mask = ~np.any(np.isnan(endog), axis=1)
endog = endog[mask]
if exog is not None:
exog = exog[mask]
# Regression effects via OLS
exog_params = np.zeros(0)
if self.k_exog > 0:
exog_params = np.linalg.pinv(exog).dot(endog).T
endog -= np.dot(exog, exog_params.T)
# B. Run a VAR model on endog to get trend, AR parameters
ar_params = []
k_ar = self.k_ar if self.k_ar > 0 else 1
mod_ar = var_model.VAR(endog)
res_ar = mod_ar.fit(maxlags=k_ar, ic=None, trend=self.trend)
ar_params = np.array(res_ar.params.T)
if self.trend == 'c':
trend_params = ar_params[:, 0]
if self.k_ar > 0:
ar_params = ar_params[:, 1:].ravel()
else:
ar_params = []
elif self.k_ar > 0:
ar_params = ar_params.ravel()
else:
ar_params = []
endog = res_ar.resid
# Test for stationarity
if self.k_ar > 0 and self.enforce_stationarity:
coefficient_matrices = (
ar_params.reshape(
self.k_endog * self.k_ar, self.k_endog
).T
).reshape(self.k_endog, self.k_endog, self.k_ar).T
stationary = is_invertible([1] + list(-coefficient_matrices))
if not stationary:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# C. Run a VAR model on the residuals to get MA parameters
ma_params = []
if self.k_ma > 0:
mod_ma = var_model.VAR(endog)
res_ma = mod_ma.fit(maxlags=self.k_ma, ic=None, trend='nc')
ma_params = np.array(res_ma.params.T).ravel()
# Test for invertibility
if self.enforce_invertibility:
coefficient_matrices = (
ma_params.reshape(
self.k_endog * self.k_ma, self.k_endog
).T
).reshape(self.k_endog, self.k_endog, self.k_ma).T
invertible = is_invertible([1] + list(-coefficient_matrices))
if not invertible:
raise ValueError('Non-invertible starting moving-average'
' parameters found with `enforce_stationarity`'
' set to True.')
# 1. Intercept terms
if self.trend == 'c':
params[self._params_trend] = trend_params
# 2. AR terms
params[self._params_ar] = ar_params
# 3. MA terms
params[self._params_ma] = ma_params
# 4. Regression terms
if self.mle_regression:
params[self._params_regression] = exog_params.ravel()
# 5. State covariance terms
if self.error_cov_type == 'diagonal':
params[self._params_state_cov] = res_ar.sigma_u.diagonal()
elif self.error_cov_type == 'unstructured':
cov_factor = np.linalg.cholesky(res_ar.sigma_u)
params[self._params_state_cov] = (
cov_factor[self._idx_lower_state_cov].ravel())
# 5. Measurement error variance terms
if self.measurement_error:
if self.k_ma > 0:
params[self._params_obs_cov] = res_ma.sigma_u.diagonal()
else:
params[self._params_obs_cov] = res_ar.sigma_u.diagonal()
return params
@property
def param_names(self):
param_names = []
# 1. Intercept terms
if self.trend == 'c':
param_names += [
'const.%s' % self.endog_names[i]
for i in range(self.k_endog)
]
# 2. AR terms
param_names += [
'L%d.%s.%s' % (i+1, self.endog_names[k], self.endog_names[j])
for j in range(self.k_endog)
for i in range(self.k_ar)
for k in range(self.k_endog)
]
# 3. MA terms
param_names += [
'L%d.e(%s).%s' % (i+1, self.endog_names[k], self.endog_names[j])
for j in range(self.k_endog)
for i in range(self.k_ma)
for k in range(self.k_endog)
]
# 4. Regression terms
param_names += [
'beta.%s.%s' % (self.exog_names[j], self.endog_names[i])
for i in range(self.k_endog)
for j in range(self.k_exog)
]
# 5. State covariance terms
if self.error_cov_type == 'diagonal':
param_names += [
'sigma2.%s' % self.endog_names[i]
for i in range(self.k_endog)
]
elif self.error_cov_type == 'unstructured':
param_names += [
('sqrt.var.%s' % self.endog_names[i] if i == j else
'sqrt.cov.%s.%s' % (self.endog_names[j], self.endog_names[i]))
for i in range(self.k_endog)
for j in range(i+1)
]
# 5. Measurement error variance terms
if self.measurement_error:
param_names += [
'measurement_variance.%s' % self.endog_names[i]
for i in range(self.k_endog)
]
return param_names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evalation.
Notes
-----
Constrains the factor transition to be stationary and variances to be
positive.
"""
unconstrained = np.array(unconstrained, ndmin=1)
constrained = np.zeros(unconstrained.shape, dtype=unconstrained.dtype)
# 1. Intercept terms: nothing to do
constrained[self._params_trend] = unconstrained[self._params_trend]
# 2. AR terms: optionally force to be stationary
if self.k_ar > 0 and self.enforce_stationarity:
# Create the state covariance matrix
if self.error_cov_type == 'diagonal':
state_cov = np.diag(unconstrained[self._params_state_cov]**2)
elif self.error_cov_type == 'unstructured':
state_cov_lower = np.zeros(self.ssm['state_cov'].shape,
dtype=unconstrained.dtype)
state_cov_lower[self._idx_lower_state_cov] = (
unconstrained[self._params_state_cov])
state_cov = np.dot(state_cov_lower, state_cov_lower.T)
# Transform the parameters
coefficients = unconstrained[self._params_ar].reshape(
self.k_endog, self.k_endog * self.k_ar)
coefficient_matrices, variance = (
constrain_stationary_multivariate(coefficients, state_cov))
constrained[self._params_ar] = coefficient_matrices.ravel()
else:
constrained[self._params_ar] = unconstrained[self._params_ar]
# 3. MA terms: optionally force to be invertible
if self.k_ma > 0 and self.enforce_invertibility:
# Transform the parameters, using an identity variance matrix
state_cov = np.eye(self.k_endog, dtype=unconstrained.dtype)
coefficients = unconstrained[self._params_ma].reshape(
self.k_endog, self.k_endog * self.k_ma)
coefficient_matrices, variance = (
constrain_stationary_multivariate(coefficients, state_cov))
constrained[self._params_ma] = coefficient_matrices.ravel()
else:
constrained[self._params_ma] = unconstrained[self._params_ma]
# 4. Regression terms: nothing to do
constrained[self._params_regression] = (
unconstrained[self._params_regression])
# 5. State covariance terms
# If we have variances, force them to be positive
if self.error_cov_type == 'diagonal':
constrained[self._params_state_cov] = (
unconstrained[self._params_state_cov]**2)
# Otherwise, nothing needs to be done
elif self.error_cov_type == 'unstructured':
constrained[self._params_state_cov] = (
unconstrained[self._params_state_cov])
# 5. Measurement error variance terms
if self.measurement_error:
# Force these to be positive
constrained[self._params_obs_cov] = (
unconstrained[self._params_obs_cov]**2)
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer.
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evalution, to be
transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
constrained = np.array(constrained, ndmin=1)
unconstrained = np.zeros(constrained.shape, dtype=constrained.dtype)
# 1. Intercept terms: nothing to do
unconstrained[self._params_trend] = constrained[self._params_trend]
# 2. AR terms: optionally were forced to be stationary
if self.k_ar > 0 and self.enforce_stationarity:
# Create the state covariance matrix
if self.error_cov_type == 'diagonal':
state_cov = np.diag(constrained[self._params_state_cov])
elif self.error_cov_type == 'unstructured':
state_cov_lower = np.zeros(self.ssm['state_cov'].shape,
dtype=constrained.dtype)
state_cov_lower[self._idx_lower_state_cov] = (
constrained[self._params_state_cov])
state_cov = np.dot(state_cov_lower, state_cov_lower.T)
# Transform the parameters
coefficients = constrained[self._params_ar].reshape(
self.k_endog, self.k_endog * self.k_ar)
unconstrained_matrices, variance = (
unconstrain_stationary_multivariate(coefficients, state_cov))
unconstrained[self._params_ar] = unconstrained_matrices.ravel()
else:
unconstrained[self._params_ar] = constrained[self._params_ar]
# 3. MA terms: optionally were forced to be invertible
if self.k_ma > 0 and self.enforce_invertibility:
# Transform the parameters, using an identity variance matrix
state_cov = np.eye(self.k_endog, dtype=constrained.dtype)
coefficients = constrained[self._params_ma].reshape(
self.k_endog, self.k_endog * self.k_ma)
unconstrained_matrices, variance = (
unconstrain_stationary_multivariate(coefficients, state_cov))
unconstrained[self._params_ma] = unconstrained_matrices.ravel()
else:
unconstrained[self._params_ma] = constrained[self._params_ma]
# 4. Regression terms: nothing to do
unconstrained[self._params_regression] = (
constrained[self._params_regression])
# 5. State covariance terms
# If we have variances, then these were forced to be positive
if self.error_cov_type == 'diagonal':
unconstrained[self._params_state_cov] = (
constrained[self._params_state_cov]**0.5)
# Otherwise, nothing needs to be done
elif self.error_cov_type == 'unstructured':
unconstrained[self._params_state_cov] = (
constrained[self._params_state_cov])
# 5. Measurement error variance terms
if self.measurement_error:
# These were forced to be positive
unconstrained[self._params_obs_cov] = (
constrained[self._params_obs_cov]**0.5)
return unconstrained
def update(self, params, **kwargs):
params = super(VARMAX, self).update(params, **kwargs)
# 1. State intercept
if self.mle_regression:
exog_params = params[self._params_regression].reshape(
self.k_endog, self.k_exog).T
intercept = np.dot(self.exog, exog_params)
if self.trend == 'c':
intercept += params[self._params_trend]
self.ssm[self._idx_state_intercept] = intercept.T
elif self.trend == 'c':
self.ssm[self._idx_state_intercept] = params[self._params_trend]
# 2. Transition
ar = params[self._params_ar].reshape(
self.k_endog, self.k_endog * self.k_ar)
ma = params[self._params_ma].reshape(
self.k_endog, self.k_endog * self.k_ma)
self.ssm[self._idx_transition] = np.c_[ar, ma]
# 3. State covariance
if self.error_cov_type == 'diagonal':
self.ssm[self._idx_state_cov] = (
params[self._params_state_cov]
)
elif self.error_cov_type == 'unstructured':
state_cov_lower = np.zeros(self.ssm['state_cov'].shape,
dtype=params.dtype)
state_cov_lower[self._idx_lower_state_cov] = (
params[self._params_state_cov])
self.ssm['state_cov'] = np.dot(state_cov_lower, state_cov_lower.T)
# 4. Observation covariance
if self.measurement_error:
self.ssm[self._idx_obs_cov] = params[self._params_obs_cov]
class VARMAXResults(MLEResults):
"""
Class to hold results from fitting an VARMAX model.
Parameters
----------
model : VARMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the VARMAX model instance.
coefficient_matrices_var : array
Array containing autoregressive lag polynomial coefficient matrices,
ordered from lowest degree to highest.
coefficient_matrices_vma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg',
**kwargs):
super(VARMAXResults, self).__init__(model, params, filter_results,
cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
self.specification = Bunch(**{
# Set additional model parameters
'error_cov_type': self.model.error_cov_type,
'measurement_error': self.model.measurement_error,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'order': self.model.order,
# Model order
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
})
# Polynomials / coefficient matrices
self.coefficient_matrices_var = None
self.coefficient_matrices_vma = None
if self.model.k_ar > 0:
ar_params = np.array(self.params[self.model._params_ar])
k_endog = self.model.k_endog
k_ar = self.model.k_ar
self.coefficient_matrices_var = (
ar_params.reshape(k_endog * k_ar, k_endog).T
).reshape(k_endog, k_endog, k_ar).T
if self.model.k_ma > 0:
ma_params = np.array(self.params[self.model._params_ma])
k_endog = self.model.k_endog
k_ma = self.model.k_ma
self.coefficient_matrices_vma = (
ma_params.reshape(k_endog * k_ma, k_endog).T
).reshape(k_endog, k_endog, k_ma).T
def get_prediction(self, start=None, end=None, dynamic=False, exog=None,
**kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = 0
# Handle end (e.g. date)
_start = self.model._get_predict_start(start)
_end, _out_of_sample = self.model._get_predict_end(end)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux VARMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
# TODO replace with init_kwds or specification or similar
model = VARMAX(
endog,
exog=exog,
order=self.model.order,
trend=self.model.trend,
error_cov_type=self.model.error_cov_type,
measurement_error=self.model.measurement_error,
enforce_stationarity=self.model.enforce_stationarity,
enforce_invertibility=self.model.enforce_invertibility
)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.', ValueWarning)
return super(VARMAXResults, self).get_prediction(
start=start, end=end, dynamic=dynamic, exog=exog, **kwargs
)
def summary(self, alpha=.05, start=None, separate_params=True):
from statsmodels.iolib.summary import summary_params
# Create the model name
spec = self.specification
if spec.k_ar > 0 and spec.k_ma > 0:
model_name = 'VARMA'
order = '(%s,%s)' % (spec.k_ar, spec.k_ma)
elif spec.k_ar > 0:
model_name = 'VAR'
order = '(%s)' % (spec.k_ar)
else:
model_name = 'VMA'
order = '(%s)' % (spec.k_ma)
if spec.k_exog > 0:
model_name += 'X'
model_name = [model_name + order]
if spec.trend == 'c':
model_name.append('intercept')
if spec.measurement_error:
model_name.append('measurement error')
summary = super(VARMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name,
display_params=not separate_params
)
if separate_params:
indices = np.arange(len(self.params))
def make_table(self, mask, title, strip_end=True):
res = (self, self.params[mask], self.bse[mask],
self.zvalues[mask], self.pvalues[mask],
self.conf_int(alpha)[mask])
param_names = [
'.'.join(name.split('.')[:-1]) if strip_end else name
for name in
np.array(self.data.param_names)[mask].tolist()
]
return summary_params(res, yname=None, xname=param_names,
alpha=alpha, use_t=False, title=title)
# Add parameter tables for each endogenous variable
k_endog = self.model.k_endog
k_ar = self.model.k_ar
k_ma = self.model.k_ma
k_exog = self.model.k_exog
endog_masks = []
for i in range(k_endog):
masks = []
offset = 0
# 1. Intercept terms
if self.model.trend == 'c':
masks.append(np.array(i, ndmin=1))
offset += k_endog
# 2. AR terms
if k_ar > 0:
start = i * k_endog * k_ar
end = (i + 1) * k_endog * k_ar
masks.append(
offset + np.arange(start, end))
offset += k_ar * k_endog**2
# 3. MA terms
if k_ma > 0:
start = i * k_endog * k_ma
end = (i + 1) * k_endog * k_ma
masks.append(
offset + np.arange(start, end))
offset += k_ma * k_endog**2
# 4. Regression terms
if k_exog > 0:
masks.append(
offset + np.arange(i * k_exog, (i + 1) * k_exog))
offset += k_endog * k_exog
# 5. Measurement error variance terms
if self.model.measurement_error:
masks.append(np.array(self.model.k_params - i - 1, ndmin=1))
# Create the table
mask = np.concatenate(masks)
endog_masks.append(mask)
title = "Results for equation %s" % self.model.endog_names[i]
table = make_table(self, mask, title)
summary.tables.append(table)
# State covariance terms
state_cov_mask = (
np.arange(len(self.params))[self.model._params_state_cov])
table = make_table(self, state_cov_mask, "Error covariance matrix",
strip_end=False)
summary.tables.append(table)
# Add a table for all other parameters
masks = []
for m in (endog_masks, [state_cov_mask]):
m = np.array(m).flatten()
if len(m) > 0:
masks.append(m)
masks = np.concatenate(masks)
inverse_mask = np.array(list(set(indices).difference(set(masks))))
if len(inverse_mask) > 0:
table = make_table(self, inverse_mask, "Other parameters",
strip_end=False)
summary.tables.append(table)
return summary
summary.__doc__ = MLEResults.summary.__doc__
class VARMAXResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(VARMAXResultsWrapper, VARMAXResults)
|
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Parameter sets for training of model-based RL agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensor2tensor.data_generators import gym_env
from tensor2tensor.utils import registry
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("loop_hparams_set", "rlmb_base",
"Which RL hparams set to use.")
flags.DEFINE_string("loop_hparams", "", "Overrides for overall loop HParams.")
flags.DEFINE_string("job_dir_to_evaluate", "",
"Directory of a job to be evaluated.")
flags.DEFINE_string("eval_results_dir", "/tmp",
"Directory to store result of evaluation")
HP_SCOPES = ["loop", "model", "ppo"]
def _rlmb_base():
return tf.contrib.training.HParams(
epochs=15,
# Total frames used for training. This will be distributed evenly across
# hparams.epochs.
# This number should be divisible by real_ppo_epoch_length*epochs
# for our frame accounting to be preceise.
num_real_env_frames=96000,
generative_model="next_frame_basic_deterministic",
generative_model_params="next_frame_pixel_noise",
autoencoder_train_steps=0,
autoencoder_train_steps_initial_multiplier=10,
autoencoder_hparams_set="autoencoder_discrete_pong",
model_train_steps=15000,
initial_epoch_train_steps_multiplier=3,
# Use random starts when learning agent on simulated env.
simulation_random_starts=True,
# Flip the first random frame in PPO batch for the true beginning.
simulation_flip_first_random_for_beginning=False,
intrinsic_reward_scale=0.,
# Resizing.
resize_height_factor=2,
resize_width_factor=2,
grayscale=False,
# Maximum number of noops to make on environment reset.
max_num_noops=8,
# Bump learning rate after first epoch by 3x.
# We picked 3x because our default learning rate schedule decreases with
# 1/square root of step; 1/sqrt(10k) = 0.01 and 1/sqrt(100k) ~ 0.0032
# so by bumping it up 3x we about "go back" from 100k steps to 10k, which
# is approximately as much as "going back 1 epoch" would be.
# In your experiments, you want to optimize this rate to your schedule.
learning_rate_bump=3.0,
# Batch size during evaluation. Metrics are averaged over this number of
# rollouts.
eval_max_num_noops=8,
game="pong",
# Whether to evaluate the world model in each iteration of the loop to get
# the model_reward_accuracy metric.
eval_world_model=True,
# Number of concurrent rollouts in world model evaluation.
wm_eval_batch_size=16,
# Number of batches to run for world model evaluation.
wm_eval_num_batches=8,
# Ratios of ppo_epoch_length to report reward_accuracy on.
wm_eval_rollout_ratios=[0.25, 0.5, 1, 2],
stop_loop_early=False, # To speed-up tests.
env_timesteps_limit=-1, # Use default from gym.make()
# Number of last observations to feed to the agent and world model.
frame_stack_size=4,
# This is only used for world-model evaluation currently, PolicyLearner
# uses algorithm specific hparams to set this during training.
simulated_rollout_length=50,
# To be overridden.
base_algo="",
base_algo_params="",
# Number of real environments to train on simultaneously.
real_batch_size=-1,
# Number of simulated environments to train on simultaneously.
simulated_batch_size=-1,
eval_batch_size=-1,
)
def update_hparams(hparams, other):
for key, value in six.iteritems(other):
if key in hparams.values():
hparams.set_hparam(key, value)
else:
hparams.add_hparam(key, value)
@registry.register_hparams
def rlmb_ppo_base():
"""HParams for PPO base."""
hparams = _rlmb_base()
ppo_params = dict(
base_algo="ppo",
base_algo_params="ppo_original_params",
# Number of real environments to train on simultaneously.
real_batch_size=1,
# Number of simulated environments to train on simultaneously.
simulated_batch_size=16,
eval_batch_size=30,
# Unused; number of PPO epochs is calculated from the real frame limit.
real_ppo_epochs_num=0,
# Number of frames that can be taken from the simulated environment before
# it diverges, used for training the agent.
ppo_epochs_num=1000, # This should be enough to see something
# Should be equal to simulated_rollout_length.
# TODO(koz4k): Uncouple this by outputing done from SimulatedBatchEnv.
ppo_epoch_length=hparams.simulated_rollout_length,
# Do not eval since simulated batch env does not produce dones
ppo_eval_every_epochs=0,
ppo_learning_rate=1e-4, # Will be changed, just so it exists.
# This needs to be divisible by real_ppo_effective_num_agents.
real_ppo_epoch_length=16 * 200,
real_ppo_learning_rate=1e-4,
real_ppo_effective_num_agents=16,
real_ppo_eval_every_epochs=0,
simulation_flip_first_random_for_beginning=True,
)
update_hparams(hparams, ppo_params)
return hparams
@registry.register_hparams
def rlmb_base():
return rlmb_ppo_base()
@registry.register_hparams
def rlmb_dqn_base():
"""rlmb_dqn_base params."""
hparams = _rlmb_base()
simulated_rollout_length = 10
dqn_params = dict(
base_algo="dqn",
base_algo_params="dqn_original_params",
real_batch_size=1,
simulated_batch_size=1,
dqn_agent_generates_trainable_dones=False,
eval_batch_size=1,
# Must be equal to dqn_time_limit for now
simulated_rollout_length=simulated_rollout_length,
dqn_time_limit=simulated_rollout_length,
simulation_flip_first_random_for_beginning=False,
)
update_hparams(hparams, dqn_params)
return hparams
@registry.register_hparams
def rlmb_basetest():
"""Base setting but quicker with only 2 epochs."""
hparams = rlmb_base()
hparams.game = "pong"
hparams.epochs = 2
hparams.num_real_env_frames = 3200
hparams.model_train_steps = 100
hparams.ppo_epochs_num = 2
return hparams
@registry.register_hparams
def rlmb_noresize():
hparams = rlmb_base()
hparams.resize_height_factor = 1
hparams.resize_width_factor = 1
return hparams
@registry.register_hparams
def rlmb_quick():
"""Base setting but quicker with only 2 epochs."""
hparams = rlmb_base()
hparams.epochs = 2
hparams.model_train_steps = 25000
hparams.ppo_epochs_num = 700
hparams.ppo_epoch_length = 50
return hparams
@registry.register_hparams
def rlmb_quick_noresize():
hparams = rlmb_base()
hparams.resize_height_factor = 1
hparams.resize_width_factor = 1
return hparams
@registry.register_hparams
def rlmb_quick_sd():
"""Quick setting with stochastic discrete model."""
hparams = rlmb_quick()
hparams.generative_model = "next_frame_basic_stochastic_discrete"
hparams.generative_model_params = "next_frame_basic_stochastic_discrete"
return hparams
@registry.register_hparams
def rlmb_sdtest():
"""Test setting with stochastic discrete model."""
hparams = rlmb_basetest()
hparams.generative_model = "next_frame_basic_stochastic_discrete"
hparams.generative_model_params = "next_frame_basic_stochastic_discrete"
return hparams
@registry.register_hparams
def rlmb_quick_sm():
"""Quick setting with sampling."""
hparams = rlmb_quick()
hparams.generative_model_params = "next_frame_sampling"
return hparams
@registry.register_hparams
def rlmb_base_stochastic():
"""Base setting with a stochastic next-frame model."""
hparams = rlmb_base()
hparams.initial_epoch_train_steps_multiplier = 5
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
@registry.register_hparams
def rlmb_base_sampling_stochastic():
"""Base setting with a stochastic next-frame model."""
hparams = rlmb_base()
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_sampling_stochastic"
return hparams
@registry.register_hparams
def rlmb_base_stochastic_discrete():
"""Base setting with stochastic discrete model."""
hparams = rlmb_base()
hparams.learning_rate_bump = 1.0
hparams.grayscale = False
hparams.generative_model = "next_frame_basic_stochastic_discrete"
hparams.generative_model_params = "next_frame_basic_stochastic_discrete"
return hparams
@registry.register_hparams
def rlmb_long_stochastic_discrete():
"""Long setting with stochastic discrete model."""
hparams = rlmb_base()
hparams.learning_rate_bump = 1.0
hparams.grayscale = False
hparams.generative_model = "next_frame_basic_stochastic_discrete"
hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long"
hparams.ppo_epochs_num = 2000
return hparams
@registry.register_hparams
def rlmb_base_stochastic_recurrent():
"""Base setting with recurrent model."""
hparams = rlmb_base()
hparams.generative_model = "next_frame_basic_recurrent"
hparams.generative_model_params = "next_frame_basic_recurrent"
return hparams
@registry.register_hparams
def rlmb_base_stochastic_discrete_noresize():
"""Base setting with stochastic discrete model."""
hparams = rlmb_base()
hparams.generative_model = "next_frame_basic_stochastic_discrete"
hparams.generative_model_params = "next_frame_basic_stochastic_discrete"
hparams.resize_height_factor = 1
hparams.resize_width_factor = 1
return hparams
@registry.register_hparams
def rlmb_base_sv2p():
"""Base setting with sv2p as world model."""
hparams = rlmb_base()
hparams.learning_rate_bump = 1.0
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_atari"
return hparams
@registry.register_hparams
def rlmb_base_sv2p_softmax():
"""Base setting with sv2p as world model with softmax."""
hparams = rlmb_base_sv2p()
hparams.generative_model_params = "next_frame_sv2p_atari_softmax"
return hparams
@registry.register_hparams
def rlmb_base_sv2p_deterministic():
"""Base setting with deterministic sv2p as world model."""
hparams = rlmb_base_sv2p()
hparams.generative_model_params = "next_frame_sv2p_atari_deterministic"
return hparams
@registry.register_hparams
def rlmb_base_sv2p_deterministic_softmax():
"""Base setting with deterministic sv2p as world model with softmax."""
hparams = rlmb_base_sv2p_softmax()
hparams.generative_model_params = (
"next_frame_sv2p_atari_softmax_deterministic")
return hparams
@registry.register_hparams
def rlmb_base_sv2p_flippy30():
"""Base setting with sv2p as world model."""
hparams = rlmb_base()
hparams.epochs = 30
hparams.ppo_epochs_num = 1000
hparams.model_train_steps = 15000
hparams.learning_rate_bump = 1.0
hparams.initial_epoch_train_steps_multiplier = 5
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_atari"
return hparams
@registry.register_hparams
def rlmb_base_sv2p_softmax_flippy30():
"""Base setting with sv2p as world model with softmax."""
hparams = rlmb_base_sv2p_flippy30()
hparams.generative_model_params = "next_frame_sv2p_atari_softmax"
return hparams
@registry.register_hparams
def rlmb_base_sv2p_deterministic_flippy30():
"""Base setting with deterministic sv2p as world model."""
hparams = rlmb_base_sv2p_flippy30()
hparams.generative_model_params = "next_frame_sv2p_atari_deterministic"
return hparams
@registry.register_hparams
def rlmb_base_sv2p_deterministic_softmax_flippy30():
"""Base setting with deterministic sv2p as world model with softmax."""
hparams = rlmb_base_sv2p_softmax_flippy30()
hparams.generative_model_params = (
"next_frame_sv2p_atari_softmax_deterministic")
return hparams
@registry.register_hparams
def rlmb_base_sampling():
"""Base setting with a stochastic next-frame model."""
hparams = rlmb_base()
hparams.generative_model_params = "next_frame_sampling"
return hparams
@registry.register_hparams
def rlmb_base_sampling_noresize():
hparams = rlmb_base_sampling()
hparams.resize_height_factor = 1
hparams.resize_width_factor = 1
return hparams
@registry.register_hparams
def rlmb_flippy60():
"""Schedule with a lot of epochs (slow)."""
hparams = rlmb_base_sampling()
hparams.epochs = 60
hparams.ppo_epochs_num = 500
hparams.model_train_steps = 10000
return hparams
@registry.register_hparams
def rlmb_flippy30():
"""Schedule with a lot of epochs (slow)."""
hparams = rlmb_base_sampling()
hparams.epochs = 30
hparams.ppo_epochs_num = 1000
hparams.model_train_steps = 15000
return hparams
@registry.register_hparams
def rlmb_medium():
"""Small set for larger testing."""
hparams = rlmb_base()
hparams.num_real_env_frames //= 2
return hparams
@registry.register_hparams
def rlmb_25k():
"""Small set for larger testing."""
hparams = rlmb_medium()
hparams.num_real_env_frames //= 2
return hparams
@registry.register_hparams
def rlmb_short():
"""Small set for larger testing."""
hparams = rlmb_base()
hparams.num_real_env_frames //= 5
hparams.model_train_steps //= 10
hparams.ppo_epochs_num //= 10
return hparams
@registry.register_hparams
def rlmb_model_only():
hp = rlmb_base()
hp.epochs = 1
hp.ppo_epochs_num = 0
return hp
def _rlmb_tiny_overrides():
"""Parameters to override for tiny setting excluding agent-related hparams."""
return dict(
epochs=1,
num_real_env_frames=128,
model_train_steps=2,
max_num_noops=1,
eval_max_num_noops=1,
generative_model_params="next_frame_tiny",
stop_loop_early=True,
resize_height_factor=2,
resize_width_factor=2,
wm_eval_rollout_ratios=[1],
env_timesteps_limit=7,
simulated_rollout_length=2,
)
@registry.register_hparams
def rlmb_ppo_tiny():
"""Tiny set for testing."""
hparams = rlmb_ppo_base()
hparams = hparams.override_from_dict(_rlmb_tiny_overrides())
update_hparams(hparams, dict(
ppo_epochs_num=2,
ppo_epoch_length=hparams.simulated_rollout_length,
real_ppo_epoch_length=36,
real_ppo_effective_num_agents=2,
real_batch_size=1,
eval_batch_size=1,
))
return hparams
@registry.register_hparams
def rlmb_tiny():
return rlmb_ppo_tiny()
@registry.register_hparams
def rlmb_dqn_tiny():
"""Tiny set for testing."""
hparams = rlmb_dqn_base()
hparams = hparams.override_from_dict(_rlmb_tiny_overrides())
update_hparams(hparams, dict(
simulated_rollout_length=2,
dqn_time_limit=2,
dqn_num_frames=128,
real_dqn_replay_buffer_replay_capacity=100,
dqn_replay_buffer_replay_capacity=100,
real_dqn_agent_min_replay_history=10,
dqn_agent_min_replay_history=10,
))
return hparams
@registry.register_hparams
def rlmb_tiny_stochastic():
"""Tiny setting with a stochastic next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_stochastic"
hparams.generative_model_params = "next_frame_basic_stochastic"
return hparams
@registry.register_hparams
def rlmb_tiny_recurrent():
"""Tiny setting with a recurrent next-frame model."""
hparams = rlmb_ppo_tiny()
hparams.epochs = 1 # Too slow with 2 for regular runs.
hparams.generative_model = "next_frame_basic_recurrent"
hparams.generative_model_params = "next_frame_basic_recurrent"
return hparams
@registry.register_hparams
def rlmb_tiny_sv2p():
"""Tiny setting with a tiny sv2p model."""
hparams = rlmb_ppo_tiny()
hparams.generative_model = "next_frame_sv2p"
hparams.generative_model_params = "next_frame_sv2p_tiny"
hparams.grayscale = False
return hparams
@registry.register_hparams
def rlmb_ae_base():
"""Parameter set for autoencoders."""
hparams = rlmb_base()
hparams.ppo_params = "ppo_pong_ae_base"
hparams.generative_model_params = "next_frame_ae"
hparams.autoencoder_hparams_set = "autoencoder_discrete_pong"
hparams.autoencoder_train_steps = 5000
hparams.resize_height_factor = 1
hparams.resize_width_factor = 1
hparams.grayscale = False
return hparams
@registry.register_hparams
def rlmb_ae_basetest():
"""Base AE setting but quicker with only 2 epochs."""
hparams = rlmb_ae_base()
hparams.game = "pong"
hparams.epochs = 2
hparams.num_real_env_frames = 3200
hparams.model_train_steps = 100
hparams.autoencoder_train_steps = 10
hparams.ppo_epochs_num = 2
return hparams
@registry.register_hparams
def rlmb_ae_tiny():
"""Tiny set for testing autoencoders."""
hparams = rlmb_tiny()
hparams.ppo_params = "ppo_pong_ae_base"
hparams.generative_model_params = "next_frame_ae_tiny"
hparams.autoencoder_hparams_set = "autoencoder_discrete_tiny"
hparams.resize_height_factor = 1
hparams.resize_width_factor = 1
hparams.grayscale = False
hparams.autoencoder_train_steps = 1
hparams.autoencoder_train_steps_initial_multiplier = 0
return hparams
@registry.register_hparams
def rlmb_tiny_simulation_deterministic_starts():
hp = rlmb_tiny()
hp.simulation_random_starts = False
return hp
# RangedHParams for tuning
# ==============================================================================
# Note that the items here must be scoped with one of
# HP_SCOPES={loop, model, ppo}, which set hyperparameters for the top-level
# hparams, hp.generative_model_params, and hp.ppo_params, respectively.
@registry.register_ranged_hparams
def rlmb_grid(rhp):
"""Grid over games and frames, and 5 runs each for variance."""
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
base = 100000
medium = base // 2
small = medium // 2
rhp.set_discrete("loop.num_real_env_frames", [base, medium, small])
# Dummy parameter to get 5 runs for each configuration
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
@registry.register_ranged_hparams
def rlmb_variance(rhp):
# Dummy parameter to get 5 runs for each configuration
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
@registry.register_ranged_hparams
def rlmb_variance_nogame(rhp):
# Dummy parameter to get 20 runs for current configuration.
rhp.set_discrete("model.moe_loss_coef", list(range(20)))
@registry.register_ranged_hparams
def rlmb_three(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_categorical("loop.game", ["breakout", "pong", "boxing"])
@registry.register_ranged_hparams
def rlmb_test1(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_categorical("loop.game", ["breakout", "pong", "boxing"])
rhp.set_discrete("loop.ppo_learning_rate", [5e-5, 1e-4, 2e-4])
rhp.set_discrete("ppo.optimization_batch_size", [20, 40])
rhp.set_discrete("loop.epochs", [3, 6])
@registry.register_ranged_hparams
def rlmb_scheduled_sampling(rhp):
rhp.set_float("model.scheduled_sampling_prob", 0.0, 1.0)
@registry.register_ranged_hparams
def rlmb_all_games(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_categorical("loop.game", gym_env.ATARI_GAMES)
@registry.register_ranged_hparams
def rlmb_whitelisted_games(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES)
@registry.register_ranged_hparams
def rlmb_human_score_games(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_categorical("loop.game",
gym_env.ATARI_GAMES_WITH_HUMAN_SCORE)
@registry.register_ranged_hparams
def rlmb_curious_games10(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_categorical("loop.game", gym_env.ATARI_CURIOUS_GAMES)
@registry.register_ranged_hparams
def rlmb_curious_games5(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_categorical("loop.game", gym_env.ATARI_CURIOUS_GAMES)
@registry.register_ranged_hparams
def rlmb_debug_games(rhp):
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_categorical("loop.game", gym_env.ATARI_DEBUG_GAMES)
@registry.register_ranged_hparams
def rlmb_ae_variance(rhp):
# Dummy parameter to get 5 runs for each configuration
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
base = 100000
small = base // 4
rhp.set_discrete("loop.num_real_env_frames", [base, small])
@registry.register_ranged_hparams
def rlmb_ppolr_game(rhp):
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
base_lr = 1e-4
rhp.set_float("loop.ppo_learning_rate", base_lr / 2, base_lr * 2)
@registry.register_ranged_hparams
def rlmb_ppolr(rhp):
base_lr = 1e-4
rhp.set_float("loop.ppo_learning_rate", base_lr / 2, base_lr * 2)
@registry.register_ranged_hparams
def rlmb_ae_ppo_lr(rhp):
rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"])
base_lr = 1e-4
rhp.set_float("loop.ppo_learning_rate", base_lr / 2, base_lr * 2)
@registry.register_ranged_hparams
def rlmb_dropout_range(rhp):
rhp.set_float("model.dropout", 0.2, 0.4)
@registry.register_ranged_hparams
def rlmb_intrinsic_reward_scale(rhp):
rhp.set_float("loop.intrinsic_reward_scale", 0.01, 10.)
@registry.register_ranged_hparams
def rlmb_l1l2cutoff_range(rhp):
"""Loss and loss-cutoff tuning grid."""
rhp.set_float("model.video_modality_loss_cutoff", 1.4, 3.4)
@registry.register_ranged_hparams
def rlmb_xentcutoff_range(rhp):
"""Cross entropy cutoff tuning grid."""
rhp.set_float("model.video_modality_loss_cutoff", 0.01, 0.05)
@registry.register_ranged_hparams
def rlmb_pixel_noise(rhp):
"""Input pixel noise tuning grid."""
rhp.set_categorical("loop.generative_model_params",
["next_frame_pixel_noise"])
rhp.set_discrete("model.video_modality_input_noise",
[0.0025 * i for i in range(200)])
@registry.register_ranged_hparams
def rlmb_dummy_range(rhp):
"""Dummy tuning grid just to get the variance."""
rhp.set_float("model.moe_loss_coef", 0.01, 0.02)
@registry.register_ranged_hparams
def rlmb_epochs_num(rhp):
rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES)
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_discrete("loop.epochs", [3, 6, 12])
@registry.register_ranged_hparams
def rlmb_ppo_epochs_num(rhp):
rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES)
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_discrete("loop.ppo_epochs_num", [200, 1000, 2000, 4000])
@registry.register_ranged_hparams
def rlmb_ppo_epoch_len(rhp):
rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES)
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_discrete("loop.ppo_epoch_length", [25, 50, 100])
@registry.register_ranged_hparams
def rlmb_num_frames(rhp):
rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES)
rhp.set_discrete("model.moe_loss_coef", list(range(5)))
rhp.set_discrete("loop.num_real_env_frames",
[1000*el for el in [30, 100, 500, 1000]])
@registry.register_ranged_hparams
def rlmb_ppo_optimization_batch_size(rhp):
rhp.set_categorical("loop.game", ["pong", "boxing", "seaquest"])
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_discrete("ppo.optimization_batch_size", [4, 10, 20])
@registry.register_ranged_hparams
def rlmb_logits_clip(rhp):
rhp.set_categorical("loop.game", ["pong", "boxing", "seaquest"])
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_discrete("ppo.logits_clip", [0., 5.])
@registry.register_ranged_hparams
def rlmb_games_problematic_for_ppo(rhp):
games = [
"alien", "boxing", "breakout", "ms_pacman", "video_pinball",
]
rhp.set_categorical("loop.game", games)
rhp.set_categorical("loop.base_algo_params", ["ppo_original_params"])
rhp.set_discrete("model.moe_loss_coef", list(range(10)))
rhp.set_discrete("ppo.logits_clip", [0., 4.0])
@registry.register_ranged_hparams
def rlmf_proportional_epoch_length(rhp):
rhp.set_discrete("proportional_epoch_length", [10, 20, 50, 100, 200, 400])
rhp.set_categorical("loop.game", gym_env.ATARI_GAMES_WITH_HUMAN_SCORE)
def merge_unscoped_hparams(scopes_and_hparams):
"""Merge multiple HParams into one with scopes."""
merged_values = {}
for (scope, hparams) in scopes_and_hparams:
for key, value in six.iteritems(hparams.values()):
scoped_key = "%s.%s" % (scope, key)
merged_values[scoped_key] = value
return tf.contrib.training.HParams(**merged_values)
def split_scoped_hparams(scopes, merged_hparams):
"""Split single HParams with scoped keys into multiple."""
split_values = {scope: {} for scope in scopes}
merged_values = merged_hparams.values()
for scoped_key, value in six.iteritems(merged_values):
scope = scoped_key.split(".")[0]
key = scoped_key[len(scope) + 1:]
split_values[scope][key] = value
return [
tf.contrib.training.HParams(**split_values[scope]) for scope in scopes
]
def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id):
"""Create HParams suitable for training loop from scoped HParams.
Args:
scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These
parameters are overrides for the base HParams created by
create_loop_hparams.
trial_id: str, trial identifier. This is used to register unique HParams
names for the underlying model and ppo HParams.
Returns:
HParams suitable for passing to training_loop.
"""
trial_hp_overrides = scoped_overrides.values()
# Create loop, model, and ppo base HParams
loop_hp = create_loop_hparams()
model_hp_name = trial_hp_overrides.get(
"loop.generative_model_params", loop_hp.generative_model_params)
model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams)
base_algo_params_name = trial_hp_overrides.get(
"loop.base_algo_params", loop_hp.base_algo_params)
algo_hp = registry.hparams(base_algo_params_name)
# Merge them and then override with the scoped overrides
combined_hp = merge_unscoped_hparams(
zip(HP_SCOPES, [loop_hp, model_hp, algo_hp]))
combined_hp.override_from_dict(trial_hp_overrides)
# Split out the component hparams
loop_hp, model_hp, algo_hp = (
split_scoped_hparams(HP_SCOPES, combined_hp))
# Dynamic register the model hp and set the new name in loop_hp
model_hp_name = "model_hp_%s" % str(trial_id)
dynamic_register_hparams(model_hp_name, model_hp)
loop_hp.generative_model_params = model_hp_name
# Dynamic register the algo hp and set the new name in loop_hp
algo_hp_name = "algo_hp_%s" % str(trial_id)
dynamic_register_hparams(algo_hp_name, algo_hp)
loop_hp.base_algo_params = algo_hp_name
return loop_hp
def dynamic_register_hparams(name, hparams):
@registry.register_hparams(name)
def new_hparams_set():
return tf.contrib.training.HParams(**hparams.values())
return new_hparams_set
def create_loop_hparams():
hparams = registry.hparams(FLAGS.loop_hparams_set)
hparams.parse(FLAGS.loop_hparams)
return hparams
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Core windowing data structures.
This module is experimental. No backwards-compatibility guarantees.
"""
# This module is carefully crafted to have optimal performance when
# compiled while still being valid Python. Care needs to be taken when
# editing this file as WindowedValues are created for every element for
# every step in a Beam pipeline.
#cython: profile=True
from __future__ import absolute_import
from builtins import object
from typing import TYPE_CHECKING
from typing import Any
from typing import Optional
from typing import Tuple
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.timestamp import TimestampTypes # pylint: disable=unused-import
if TYPE_CHECKING:
from apache_beam.transforms.window import BoundedWindow
class PaneInfoTiming(object):
"""The timing of a PaneInfo."""
EARLY = 0
ON_TIME = 1
LATE = 2
UNKNOWN = 3
@classmethod
def to_string(cls, value):
return {
cls.EARLY: 'EARLY',
cls.ON_TIME: 'ON_TIME',
cls.LATE: 'LATE',
cls.UNKNOWN: 'UNKNOWN',
}[value]
class PaneInfo(object):
"""Describes the trigger firing information for a given WindowedValue.
"Panes" represent individual firings on a single window. ``PaneInfo``s are
passed downstream after trigger firings. They contain information about
whether it's an early/on time/late firing, if it's the last or first firing
from a window, and the index of the firing.
"""
def __init__(self, is_first, is_last, timing, index, nonspeculative_index):
self._is_first = is_first
self._is_last = is_last
self._timing = timing
self._index = index
self._nonspeculative_index = nonspeculative_index
self._encoded_byte = self._get_encoded_byte()
def _get_encoded_byte(self):
byte = 0
if self._is_first:
byte |= 1
if self._is_last:
byte |= 2
byte |= self._timing << 2
return byte
@staticmethod
def from_encoded_byte(encoded_byte):
assert encoded_byte in _BYTE_TO_PANE_INFO
return _BYTE_TO_PANE_INFO[encoded_byte]
# Because common PaneInfo objects are cached, it is important that the value
# is immutable. We therefore explicitly enforce this here with read-only
# properties.
@property
def is_first(self):
return self._is_first
@property
def is_last(self):
return self._is_last
@property
def timing(self):
return self._timing
@property
def index(self):
return self._index
@property
def nonspeculative_index(self):
return self._nonspeculative_index
@property
def encoded_byte(self):
return self._encoded_byte
def __repr__(self):
return ('PaneInfo(first: %r, last: %r, timing: %s, index: %d, '
'nonspeculative_index: %d)') % (
self.is_first, self.is_last,
PaneInfoTiming.to_string(self.timing),
self.index, self.nonspeculative_index)
def __eq__(self, other):
if self is other:
return True
return (self.is_first == other.is_first and
self.is_last == other.is_last and
self.timing == other.timing and
self.index == other.index and
self.nonspeculative_index == other.nonspeculative_index)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return hash((self.is_first, self.is_last, self.timing, self.index,
self.nonspeculative_index))
def __reduce__(self):
return PaneInfo, (self._is_first, self._is_last, self._timing, self._index,
self._nonspeculative_index)
def _construct_well_known_pane_infos():
pane_infos = []
for timing in (PaneInfoTiming.EARLY, PaneInfoTiming.ON_TIME,
PaneInfoTiming.LATE, PaneInfoTiming.UNKNOWN):
nonspeculative_index = -1 if timing == PaneInfoTiming.EARLY else 0
pane_infos.append(PaneInfo(True, True, timing, 0, nonspeculative_index))
pane_infos.append(PaneInfo(True, False, timing, 0, nonspeculative_index))
pane_infos.append(PaneInfo(False, True, timing, -1, nonspeculative_index))
pane_infos.append(PaneInfo(False, False, timing, -1, nonspeculative_index))
result = [None] * (max(p.encoded_byte for p in pane_infos) + 1)
for pane_info in pane_infos:
result[pane_info.encoded_byte] = pane_info
return result
# Cache of well-known PaneInfo objects.
_BYTE_TO_PANE_INFO = _construct_well_known_pane_infos()
# Default PaneInfo descriptor for when a value is not the output of triggering.
PANE_INFO_UNKNOWN = _BYTE_TO_PANE_INFO[0xF]
class WindowedValue(object):
"""A windowed value having a value, a timestamp and set of windows.
Attributes:
value: The underlying value of a windowed value.
timestamp: Timestamp associated with the value as seconds since Unix epoch.
windows: A set (iterable) of window objects for the value. The window
object are descendants of the BoundedWindow class.
pane_info: A PaneInfo descriptor describing the triggering information for
the pane that contained this value. If None, will be set to
PANE_INFO_UNKNOWN.
"""
def __init__(self,
value,
timestamp, # type: TimestampTypes
windows, # type: Tuple[BoundedWindow, ...]
pane_info=PANE_INFO_UNKNOWN
):
# type: (...) -> None
# For performance reasons, only timestamp_micros is stored by default
# (as a C int). The Timestamp object is created on demand below.
self.value = value
if isinstance(timestamp, int):
self.timestamp_micros = timestamp * 1000000
if TYPE_CHECKING:
self.timestamp_object = None # type: Optional[Timestamp]
else:
self.timestamp_object = (timestamp if isinstance(timestamp, Timestamp)
else Timestamp.of(timestamp))
self.timestamp_micros = self.timestamp_object.micros
self.windows = windows
self.pane_info = pane_info
@property
def timestamp(self):
# type: () -> Timestamp
if self.timestamp_object is None:
self.timestamp_object = Timestamp(0, self.timestamp_micros)
return self.timestamp_object
def __repr__(self):
return '(%s, %s, %s, %s)' % (
repr(self.value),
'MIN_TIMESTAMP' if self.timestamp == MIN_TIMESTAMP else
'MAX_TIMESTAMP' if self.timestamp == MAX_TIMESTAMP else
float(self.timestamp),
self.windows,
self.pane_info)
def __eq__(self, other):
return (type(self) == type(other)
and self.timestamp_micros == other.timestamp_micros
and self.value == other.value
and self.windows == other.windows
and self.pane_info == other.pane_info)
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __hash__(self):
return ((hash(self.value) & 0xFFFFFFFFFFFFFFF) +
3 * (self.timestamp_micros & 0xFFFFFFFFFFFFFF) +
7 * (hash(self.windows) & 0xFFFFFFFFFFFFF) +
11 * (hash(self.pane_info) & 0xFFFFFFFFFFFFF))
def with_value(self, new_value):
# type: (Any) -> WindowedValue
"""Creates a new WindowedValue with the same timestamps and windows as this.
This is the fasted way to create a new WindowedValue.
"""
return create(new_value, self.timestamp_micros, self.windows,
self.pane_info)
def __reduce__(self):
return WindowedValue, (self.value, self.timestamp, self.windows,
self.pane_info)
# TODO(robertwb): Move this to a static method.
def create(value, timestamp_micros, windows, pane_info=PANE_INFO_UNKNOWN):
wv = WindowedValue.__new__(WindowedValue)
wv.value = value
wv.timestamp_micros = timestamp_micros
wv.windows = windows
wv.pane_info = pane_info
return wv
try:
WindowedValue.timestamp_object = None
except TypeError:
# When we're compiled, we can't dynamically add attributes to
# the cdef class, but in this case it's OK as it's already present
# on each instance.
pass
class _IntervalWindowBase(object):
"""Optimized form of IntervalWindow storing only microseconds for endpoints.
"""
def __init__(self, start, end):
# type: (TimestampTypes, TimestampTypes) -> None
if start is not None:
self._start_object = Timestamp.of(start) # type: Optional[Timestamp]
try:
self._start_micros = self._start_object.micros
except OverflowError:
self._start_micros = (
MIN_TIMESTAMP.micros if self._start_object.micros < 0
else MAX_TIMESTAMP.micros)
else:
# Micros must be populated elsewhere.
self._start_object = None
if end is not None:
self._end_object = Timestamp.of(end) # type: Optional[Timestamp]
try:
self._end_micros = self._end_object.micros
except OverflowError:
self._end_micros = (
MIN_TIMESTAMP.micros if self._end_object.micros < 0
else MAX_TIMESTAMP.micros)
else:
# Micros must be populated elsewhere.
self._end_object = None
@property
def start(self):
# type: () -> Timestamp
if self._start_object is None:
self._start_object = Timestamp(0, self._start_micros)
return self._start_object
@property
def end(self):
# type: () -> Timestamp
if self._end_object is None:
self._end_object = Timestamp(0, self._end_micros)
return self._end_object
def __hash__(self):
return hash((self._start_micros, self._end_micros))
def __eq__(self, other):
return (type(self) == type(other)
and self._start_micros == other._start_micros
and self._end_micros == other._end_micros)
def __ne__(self, other):
return not self == other
def __repr__(self):
return '[%s, %s)' % (float(self.start), float(self.end))
|
|
# OneShot Save Utility
# https://github.com/hunternet93/oneshot-save-utility
# ===========
# UI Settings
# ===========
bgcolor = '#13041B'
textcolor = '#FFFFAA'
highlightcolor = '#888844'
font = ('vgasys', 14)
# ===============
# Initial imports
# ===============
from rubymarshal.reader import load as rb_load
from rubymarshal.writer import write as rb_write
import platform
import base64
import shutil
import psutil
import os
from tkinter import *
import tkinter.messagebox as tkmessagebox
# =========================================
# Find and initalize the OneShot directory
# =========================================
# MacOS support has not been tested!
if platform.system() == 'Windows':
dirpath = os.path.join(os.path.expandvars('%appdata%'), 'OneShot')
oneshot_process = 'oneshot.exe'
if platform.system() == 'Linux':
dirpath = os.path.join(os.path.expanduser('~'), '.local', 'share', 'Oneshot')
oneshot_process = 'oneshot'
if platform.system() == 'Darwin': # MacOS
dirpath = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support', 'Oneshot')
oneshot_process = 'oneshot'
psettingspath = os.path.join(dirpath, 'p-settings.dat')
if not os.path.exists(psettingspath):
tkmessagebox.showerror('OneShot not initialized', "OneShot's settings have not been initialized. Please start a new OneShot game, save and exit OneShot, then restart OneShot Save Utility.")
quit()
archivepath = os.path.join(dirpath, 'OneShot Save Utility Archive')
if not os.path.exists(archivepath): os.makedirs(archivepath)
# =====================
# Functions
# =====================
# This section grew more than I expected, TODO put in its own file
def check_oneshot_running():
# Random trivia: the psutil Process class has a oneshot() method. It weirded me out of a second when I came across it in the docs. :D
if oneshot_process in [psutil.Process(i).name() for i in psutil.pids()]:
tkmessagebox.showwarning('OneShot running', 'OneShot is currently running. Please close OneShot before using Oneshot Save Utility.')
return True
else: return False
def update_loadnamelist():
saves = []
for filename in os.listdir(archivepath):
try:
saves.append(base64.urlsafe_b64decode(filename[:-4].encode('utf-8')).decode())
except:
continue
loadnamelist.delete(0, END)
for save in sorted(saves): loadnamelist.insert(END, save)
def save():
if check_oneshot_running(): return
title = savenamebox.get().strip()
if len(title) == 0:
tkmessagebox.showwarning('No title entered', 'Please enter a title for your save.')
return
# Save titles are base64-encoded to prevent issues with using illegal characters in filenames.
path = os.path.join(archivepath, base64.urlsafe_b64encode(title.encode('utf-8')).decode() + '.dat')
if os.path.exists(path):
if not tkmessagebox.askyesno('Overwrite?', 'A save with the name "{}" already exists, do you want to overwrite it?'.format(title)):
return
shutil.copy(os.path.join(dirpath, 'save.dat'), path)
savenamebox.delete(0, END)
tkmessagebox.showinfo('Save created', 'Save "{}" was successfully created.'.format(title))
update_loadnamelist()
def load():
if check_oneshot_running(): return
title = loadnamelist.get(loadnamelist.curselection()[0])
path = os.path.join(archivepath, base64.urlsafe_b64encode(title.encode('utf-8')).decode() + '.dat')
shutil.copy(path, os.path.join(dirpath, 'save.dat'))
tkmessagebox.showinfo('Save loaded', 'Save "{}" was successfully loaded.'.format(title))
def delete():
title = loadnamelist.get(loadnamelist.curselection()[0])
if tkmessagebox.askyesno('Confirm delete', 'Delete save "{}"?'.format(title)):
path = os.path.join(archivepath, base64.urlsafe_b64encode(title.encode('utf-8')).decode() + '.dat')
os.remove(path)
update_loadnamelist()
def reset_current():
if check_oneshot_running(): return
if tkmessagebox.askyesno('Confirm reset', 'Reset the current playthrough?'):
try:
os.unlink(os.path.join(dirpath, 'save.dat'))
except FileNotFoundError: pass
tkmessagebox.showinfo('Playthrough reset', 'The current playthrough has been reset.')
def reset_full():
if check_oneshot_running(): return
if tkmessagebox.askyesno('Confirm full reset', 'Completely reset the game?'):
try:
os.unlink(os.path.join(dirpath, 'save.dat'))
os.unlink(os.path.join(dirpath, 'p-settings.dat'))
except FileNotFoundError: pass
tkmessagebox.showinfo('Game reset', 'The game has been reset.')
def get_psettings():
with open(psettingspath, 'rb') as psettings:
s = [rb_load(psettings), rb_load(psettings), rb_load(psettings)]
return s
def set_psettings(data):
with open(psettingspath, 'wb') as psettings:
for d in data: rb_write(psettings, d)
def get_playername():
name = get_psettings()[2]
namebox.delete(0, END)
namebox.insert(0, name)
def set_playername():
if check_oneshot_running(): return
name = namebox.get().strip()
if len(name) == 0:
tkmessagebox.showwarning('No name entered', 'Please enter a name.')
return
data = get_psettings()
data[2] = name
set_psettings(data)
tkmessagebox.showinfo('Name changed', 'Player name has been changed to {}.'.format(name))
namebox.delete(0, END)
namebox.insert(0, name)
def get_playthroughs():
playthroughs = get_psettings()[1][1]
playthroughbox.delete(0, END)
playthroughbox.insert(0, str(playthroughs))
def set_playthroughs():
if check_oneshot_running(): return
try:
playthroughs = int(playthroughbox.get().strip())
except ValueError:
tkmessagebox.showwarning('Invalid value', 'Number of playthroughs must be a number.')
return
data = get_psettings()
data[1][1] = playthroughs
set_psettings(data)
tkmessagebox.showinfo('Playthroughs changed', 'The number of playthroughs has been set to {}.'.format(str(playthroughs)))
playthroughbox.delete(0, END)
playthroughbox.insert(0, str(playthroughs))
def get_ptype():
s = get_psettings()[0]
if s[9]: ptypevar.set(3) # Memory
elif s[1]: ptypevar.set(2) # Solstice
else: ptypevar.set(1) # First
def set_ptype():
if check_oneshot_running(): return
ptype = ptypevar.get()
data = get_psettings()
if ptype == 3:
data[0][1] = False
data[0][9] = True
text = 'Memory'
elif ptype == 2:
data[0][1] = True
data[0][9] = False
text = 'Solstice'
elif ptype == 1:
data[0][1] = False
data[0][9] = False
text = 'First'
set_psettings(data)
tkmessagebox.showinfo('Playthrough Type changed', 'The playthrough type has been set to {}.\nThis will take effect on the next playthrough.'.format(text))
def get_ruetimes():
ruetimes = get_psettings()[1][2]
ruebox.delete(0, END)
ruebox.insert(0, str(ruetimes))
def set_ruetimes():
if check_oneshot_running(): return
try:
ruetimes = int(ruebox.get().strip())
except ValueError:
tkmessagebox.showwarning('Invalid value', 'Number of times talked to Rue must be a number.')
return
data = get_psettings()
data[1][2] = ruetimes
set_psettings(data)
tkmessagebox.showinfo('Playthroughs changed', 'The number of times talked to Rue has been set to {}.'.format(str(ruetimes)))
ruebox.delete(0, END)
ruebox.insert(0, str(ruetimes))
# =================
# Initialize the UI
# =================
# Yeah, Tkinter's ugly, but also comes bundled with Python, so...
root = Tk()
root.title('OneShot Save Utility')
root.config(bg = bgcolor)
root.minsize(600, 600)
# Save UI
saveframe = Frame(root, bg = bgcolor)
saveframe.pack(fill = BOTH, expand = 1, padx = 5, pady = 5)
Label(saveframe, text = 'Create Save:', bg = bgcolor, fg = textcolor, font = font).pack()
savenamebox = Entry(saveframe, selectbackground = highlightcolor, bg = bgcolor, fg = textcolor, font = font)
savenamebox.pack(fill = X)
Button(saveframe, text = 'Save', command = save, bg = bgcolor, fg = textcolor, font = font).pack(anchor = S, expand = 1)
Frame(root, borderwidth = 1).pack(fill = X)
# Load UI
loadframe = Frame(root, bg = bgcolor)
loadframe.pack(fill = BOTH, expand = 1, padx = 20, pady = 20)
Label(loadframe, text = 'Load Save:', bg = bgcolor, fg = textcolor, font = font).pack()
loadnamelist = Listbox(loadframe, selectmode = BROWSE, selectbackground = highlightcolor, height = 5, bg = bgcolor, fg = textcolor, font = font)
update_loadnamelist()
loadnamelist.pack(fill = BOTH, expand = 1, padx = 5)
buttonframe = Frame(loadframe, bg = bgcolor)
buttonframe.pack(fill = X)
Button(loadframe, text = 'Load', command = load, bg = bgcolor, fg = textcolor, font = font).pack(side = LEFT, expand = 1)
Button(loadframe, text = 'Delete', command = delete, bg = bgcolor, fg = 'red', font = font).pack(side = RIGHT, expand = 1)
Frame(root, borderwidth = 1).pack(fill = X)
# Reset UI
resetframe = Frame(root, bg = bgcolor)
resetframe.pack(fill = BOTH, expand = 1, padx = 20, pady = 20)
Button(resetframe, text = 'Reset Current Playthrough', command = reset_current, bg = bgcolor, fg = textcolor, font = font).pack(side = LEFT, expand = 1)
Button(resetframe, text = 'Full Reset', command = reset_full, bg = bgcolor, fg = 'red', font = font).pack(side = RIGHT, expand = 1)
# Name UI
nameframe = Frame(root, bg = bgcolor)
nameframe.pack(fill = BOTH, expand = 1, padx = 5, pady = 5)
Label(nameframe, text = 'Set Player Name:', bg = bgcolor, fg = textcolor, font = font).pack()
namebox = Entry(nameframe, selectbackground = highlightcolor, bg = bgcolor, fg = textcolor, font = font)
get_playername()
namebox.pack(fill = X)
Button(nameframe, text = 'Set', command = set_playername, bg = bgcolor, fg = textcolor, font = font).pack(side = LEFT, expand = 1)
Frame(root, borderwidth = 1).pack(fill = X)
# Variable UIs
ptframe = Frame(root, bg = bgcolor)
ptframe.pack(fill = BOTH, expand = 1, padx = 5, pady = 5)
mframe = Frame(root, bg = bgcolor)
mframe.pack(fill = BOTH, expand = 1, padx = 5, pady = 5)
# Playthroughs Var UI
playthroughframe = Frame(ptframe, bg = bgcolor)
playthroughframe.pack(side = LEFT, fill = BOTH, expand = 1, padx = 5, pady = 5)
Label(playthroughframe, text = 'Playthroughs', bg = bgcolor, fg = textcolor, font = font).pack()
playthroughbox = Entry(playthroughframe, selectbackground = highlightcolor, bg = bgcolor, fg = textcolor, font = font)
get_playthroughs()
playthroughbox.pack(fill = X)
Button(playthroughframe, text = 'Set', command = set_playthroughs, bg = bgcolor, fg = textcolor, font = font).pack(expand = 1)
# Playthrough Type Var UI
ptypeframe = Frame(ptframe, bg = bgcolor)
ptypeframe.pack(side = RIGHT, fill = BOTH, expand = 1, padx = 5, pady = 5)
Label(ptypeframe, text = 'Playthrough Type', bg = bgcolor, fg = textcolor, font = font).pack()
ptypevar = IntVar(master = root)
Radiobutton(ptypeframe, text = 'First', variable = ptypevar, value = 1, command = set_ptype,
activebackground = bgcolor, activeforeground = textcolor, selectcolor = highlightcolor, bg = bgcolor, fg = textcolor, font = font).pack()
Radiobutton(ptypeframe, text = 'Solstice', variable = ptypevar, value = 2, command = set_ptype,
activebackground = bgcolor, activeforeground = textcolor, selectcolor = highlightcolor, bg = bgcolor, fg = textcolor, font = font).pack()
Radiobutton(ptypeframe, text = 'Memory', variable = ptypevar, value = 3, command = set_ptype,
activebackground = bgcolor, activeforeground = textcolor, selectcolor = highlightcolor, bg = bgcolor, fg = textcolor, font = font).pack()
get_ptype()
# Times-talked-to-Rue Var UI
rueframe = Frame(mframe, bg = bgcolor)
rueframe.pack(side = LEFT, fill = BOTH, expand = 1, padx = 5, pady = 5)
Label(rueframe, text = 'Times Spoken to Rue', bg = bgcolor, fg = textcolor, font = font).pack()
ruebox = Entry(rueframe, selectbackground = highlightcolor, bg = bgcolor, fg = textcolor, font = font)
get_ruetimes()
ruebox.pack(fill = X)
Button(rueframe, text = 'Set', command = set_ruetimes, bg = bgcolor, fg = textcolor, font = font).pack(expand = 1)
# =========================
# Run the Tkinter main loop
# =========================
root.mainloop()
|
|
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
from functools import reduce
from collections import OrderedDict
import json
import multiprocessing
import optparse
import os
import shlex
import sys
import traceback
# Add testrunner to the path.
sys.path.insert(
0,
os.path.dirname(
os.path.dirname(os.path.abspath(__file__))))
from testrunner.local import command
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.test_config import TestConfig
from testrunner.testproc import progress
from testrunner.testproc.rerun import RerunProc
from testrunner.testproc.shard import ShardProc
from testrunner.testproc.sigproc import SignalProc
from testrunner.testproc.timeout import TimeoutProc
BASE_DIR = (
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)))))
DEFAULT_OUT_GN = 'out.gn'
# Map of test name synonyms to lists of test suites. Should be ordered by
# expected runtimes (suites with slow test cases first). These groups are
# invoked in separate steps on the bots.
# The mapping from names used here to GN targets (which must stay in sync)
# is defined in infra/mb/gn_isolate_map.pyl.
TEST_MAP = {
# This needs to stay in sync with group("v8_bot_default") in test/BUILD.gn.
"bot_default": [
"debugger",
"mjsunit",
"cctest",
"wasm-spec-tests",
"inspector",
"webkit",
"mkgrokdump",
"wasm-js",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
"wasm-api-tests",
],
# This needs to stay in sync with group("v8_default") in test/BUILD.gn.
"default": [
"debugger",
"mjsunit",
"cctest",
"wasm-spec-tests",
"inspector",
"mkgrokdump",
"wasm-js",
"fuzzer",
"message",
"preparser",
"intl",
"unittests",
"wasm-api-tests",
],
# This needs to stay in sync with group("v8_d8_default") in test/BUILD.gn.
"d8_default": [
"debugger",
"mjsunit",
"webkit",
"message",
"preparser",
"intl",
],
# This needs to stay in sync with "v8_optimize_for_size" in test/BUILD.gn.
"optimize_for_size": [
"debugger",
"mjsunit",
"cctest",
"inspector",
"webkit",
"intl",
],
"unittests": [
"unittests",
],
}
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mips",
"mipsel",
"mips64",
"mips64el",
"s390",
"s390x",
"arm64"]
class ModeConfig(object):
def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
self.flags = flags
self.timeout_scalefactor = timeout_scalefactor
self.status_mode = status_mode
self.execution_mode = execution_mode
DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap",
"--testing-d8-test-runner"]
RELEASE_FLAGS = ["--nohard-abort", "--testing-d8-test-runner"]
MODES = {
"debug": ModeConfig(
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
),
"optdebug": ModeConfig(
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
),
"release": ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="release",
execution_mode="release",
),
# Normal trybot release configuration. There, dchecks are always on which
# implies debug is set. Hence, the status file needs to assume debug-like
# behavior/timeouts.
"tryrelease": ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="debug",
execution_mode="release",
),
# This mode requires v8 to be compiled with dchecks and slow dchecks.
"slowrelease": ModeConfig(
flags=RELEASE_FLAGS + ["--enable-slow-asserts"],
timeout_scalefactor=2,
status_mode="debug",
execution_mode="release",
),
}
PROGRESS_INDICATORS = {
'verbose': progress.VerboseProgressIndicator,
'ci': progress.CIProgressIndicator,
'dots': progress.DotsProgressIndicator,
'color': progress.ColorProgressIndicator,
'mono': progress.MonochromeProgressIndicator,
}
class TestRunnerError(Exception):
pass
class BuildConfig(object):
def __init__(self, build_config):
# In V8 land, GN's x86 is called ia32.
if build_config['v8_target_cpu'] == 'x86':
self.arch = 'ia32'
else:
self.arch = build_config['v8_target_cpu']
self.asan = build_config['is_asan']
self.cfi_vptr = build_config['is_cfi']
self.dcheck_always_on = build_config['dcheck_always_on']
self.gcov_coverage = build_config['is_gcov_coverage']
self.is_android = build_config['is_android']
self.is_clang = build_config['is_clang']
self.is_debug = build_config['is_debug']
self.is_full_debug = build_config['is_full_debug']
self.msan = build_config['is_msan']
self.no_i18n = not build_config['v8_enable_i18n_support']
# TODO(https://crbug.com/v8/8531)
# 'v8_use_snapshot' was removed, 'no_snap' can be removed as well.
self.no_snap = False
self.predictable = build_config['v8_enable_verify_predictable']
self.tsan = build_config['is_tsan']
# TODO(machenbach): We only have ubsan not ubsan_vptr.
self.ubsan_vptr = build_config['is_ubsan_vptr']
self.embedded_builtins = build_config['v8_enable_embedded_builtins']
self.verify_csa = build_config['v8_enable_verify_csa']
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
# Export only for MIPS target
if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
self.mips_arch_variant = build_config['mips_arch_variant']
self.mips_use_msa = build_config['mips_use_msa']
@property
def use_sanitizer(self):
return (self.asan or self.cfi_vptr or self.msan or self.tsan or
self.ubsan_vptr)
def __str__(self):
detected_options = []
if self.asan:
detected_options.append('asan')
if self.cfi_vptr:
detected_options.append('cfi_vptr')
if self.dcheck_always_on:
detected_options.append('dcheck_always_on')
if self.gcov_coverage:
detected_options.append('gcov_coverage')
if self.msan:
detected_options.append('msan')
if self.no_i18n:
detected_options.append('no_i18n')
if self.no_snap:
detected_options.append('no_snap')
if self.predictable:
detected_options.append('predictable')
if self.tsan:
detected_options.append('tsan')
if self.ubsan_vptr:
detected_options.append('ubsan_vptr')
if self.embedded_builtins:
detected_options.append('embedded_builtins')
if self.verify_csa:
detected_options.append('verify_csa')
if self.lite_mode:
detected_options.append('lite_mode')
if self.pointer_compression:
detected_options.append('pointer_compression')
return '\n'.join(detected_options)
class BaseTestRunner(object):
def __init__(self, basedir=None):
self.basedir = basedir or BASE_DIR
self.outdir = None
self.build_config = None
self.mode_name = None
self.mode_options = None
self.target_os = None
@property
def framework_name(self):
"""String name of the base-runner subclass, used in test results."""
raise NotImplementedError()
def execute(self, sys_args=None):
if sys_args is None: # pragma: no cover
sys_args = sys.argv[1:]
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print(' '.join(sys.argv))
self._load_build_config(options)
command.setup(self.target_os, options.device)
try:
self._process_default_options(options)
self._process_options(options)
except TestRunnerError:
parser.print_help()
raise
args = self._parse_test_args(args)
tests = self._load_testsuite_generators(args, options)
self._setup_env()
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
exit_code = self._do_execute(tests, args, options)
if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
print("Force exit code 0 after failures. Json test results file "
"generated with failure information.")
exit_code = utils.EXIT_CODE_PASS
return exit_code
except TestRunnerError:
traceback.print_exc()
return utils.EXIT_CODE_INTERNAL_ERROR
except KeyboardInterrupt:
return utils.EXIT_CODE_INTERRUPTED
except Exception:
traceback.print_exc()
return utils.EXIT_CODE_INTERNAL_ERROR
finally:
command.tear_down()
def _create_parser(self):
parser = optparse.OptionParser()
parser.usage = '%prog [options] [tests]'
parser.description = """TESTS: %s""" % (TEST_MAP["default"])
self._add_parser_default_options(parser)
self._add_parser_options(parser)
return parser
def _add_parser_default_options(self, parser):
parser.add_option("--gn", help="Scan out.gn for the last built"
" configuration",
default=False, action="store_true")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
parser.add_option("--arch",
help="The architecture to run tests for")
parser.add_option("-m", "--mode",
help="The test mode in which to run (uppercase for builds"
" in CI): %s" % MODES.keys())
parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
"directory will be used")
parser.add_option("--test-root", help="Root directory of the test suites",
default=os.path.join(self.basedir, 'test'))
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run")
parser.add_option("--swarming", default=False, action="store_true",
help="Indicates running test driver on swarming.")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type=int)
parser.add_option("-d", "--device",
help="The device ID to run Android tests on. If not "
"given it will be autodetected.")
# Shard
parser.add_option("--shard-count", default=1, type=int,
help="Split tests into this number of shards")
parser.add_option("--shard-run", default=1, type=int,
help="Run this shard from the split up tests.")
# Progress
parser.add_option("-p", "--progress",
choices=PROGRESS_INDICATORS.keys(), default="mono",
help="The style of progress indicator (verbose, dots, "
"color, mono)")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--junitout", help="File name of the JUnit output")
parser.add_option("--junittestsuite", default="v8tests",
help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
parser.add_option("--ci-test-completion",
help="Path to a file for logging test completion in the "
"context of CI progress indicator. Ignored if "
"progress indicator is other than 'ci'.")
# Rerun
parser.add_option("--rerun-failures-count", default=0, type=int,
help="Number of times to rerun each failing test case. "
"Very slow tests will be rerun only once.")
parser.add_option("--rerun-failures-max", default=100, type=int,
help="Maximum number of failing test cases to rerun")
# Test config
parser.add_option("--command-prefix", default="",
help="Prepended to each shell command used to run a test")
parser.add_option("--extra-flags", action="append", default=[],
help="Additional flags to pass to each test command")
parser.add_option("--isolates", action="store_true", default=False,
help="Whether to test isolates")
parser.add_option("--no-harness", "--noharness",
default=False, action="store_true",
help="Run without test harness of a given suite")
parser.add_option("--random-seed", default=0, type=int,
help="Default seed for initializing random generator")
parser.add_option("--run-skipped", help="Also run skipped tests.",
default=False, action="store_true")
parser.add_option("-t", "--timeout", default=60, type=int,
help="Timeout for single test in seconds")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="Verbose output")
# TODO(machenbach): Temporary options for rolling out new test runner
# features.
parser.add_option("--mastername", default='',
help="Mastername property from infrastructure. Not "
"setting this option indicates manual usage.")
parser.add_option("--buildername", default='',
help="Buildername property from infrastructure. Not "
"setting this option indicates manual usage.")
def _add_parser_options(self, parser):
pass
def _parse_args(self, parser, sys_args):
options, args = parser.parse_args(sys_args)
if any(map(lambda v: v and ',' in v,
[options.arch, options.mode])): # pragma: no cover
print('Multiple arch/mode are deprecated')
raise TestRunnerError()
return options, args
def _load_build_config(self, options):
for outdir in self._possible_outdirs(options):
try:
self.build_config = self._do_load_build_config(outdir, options.verbose)
except TestRunnerError:
pass
if not self.build_config: # pragma: no cover
print('Failed to load build config')
raise TestRunnerError
print('Build found: %s' % self.outdir)
if str(self.build_config):
print('>>> Autodetected:')
print(self.build_config)
# Represents the OS where tests are run on. Same as host OS except for
# Android, which is determined by build output.
if self.build_config.is_android:
self.target_os = 'android'
else:
self.target_os = utils.GuessOS()
# Returns possible build paths in order:
# gn
# outdir
# outdir/arch.mode
# Each path is provided in two versions: <path> and <path>/mode for bots.
def _possible_outdirs(self, options):
def outdirs():
if options.gn:
yield self._get_gn_outdir()
return
yield options.outdir
if options.arch and options.mode:
yield os.path.join(options.outdir,
'%s.%s' % (options.arch, options.mode))
for outdir in outdirs():
yield os.path.join(self.basedir, outdir)
# bot option
if options.mode:
yield os.path.join(self.basedir, outdir, options.mode)
def _get_gn_outdir(self):
gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
latest_timestamp = -1
latest_config = None
for gn_config in os.listdir(gn_out_dir):
gn_config_dir = os.path.join(gn_out_dir, gn_config)
if not os.path.isdir(gn_config_dir):
continue
if os.path.getmtime(gn_config_dir) > latest_timestamp:
latest_timestamp = os.path.getmtime(gn_config_dir)
latest_config = gn_config
if latest_config:
print(">>> Latest GN build found: %s" % latest_config)
return os.path.join(DEFAULT_OUT_GN, latest_config)
def _do_load_build_config(self, outdir, verbose=False):
build_config_path = os.path.join(outdir, "v8_build_config.json")
if not os.path.exists(build_config_path):
if verbose:
print("Didn't find build config: %s" % build_config_path)
raise TestRunnerError()
with open(build_config_path) as f:
try:
build_config_json = json.load(f)
except Exception: # pragma: no cover
print("%s exists but contains invalid json. Is your build up-to-date?"
% build_config_path)
raise TestRunnerError()
# In auto-detect mode the outdir is always where we found the build config.
# This ensures that we'll also take the build products from there.
self.outdir = os.path.dirname(build_config_path)
return BuildConfig(build_config_json)
def _process_default_options(self, options):
# We don't use the mode for more path-magic.
# Therefore transform the bot mode here to fix build_config value.
if options.mode:
options.mode = self._bot_to_v8_mode(options.mode)
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
if options.mode:
if options.mode not in MODES: # pragma: no cover
print('%s mode is invalid' % options.mode)
raise TestRunnerError()
if MODES[options.mode].execution_mode != build_config_mode:
print ('execution mode (%s) for %s is inconsistent with build config '
'(%s)' % (
MODES[options.mode].execution_mode,
options.mode,
build_config_mode))
raise TestRunnerError()
self.mode_name = options.mode
else:
self.mode_name = build_config_mode
self.mode_options = MODES[self.mode_name]
if options.arch and options.arch != self.build_config.arch:
print('--arch value (%s) inconsistent with build config (%s).' % (
options.arch, self.build_config.arch))
raise TestRunnerError()
if options.shell_dir: # pragma: no cover
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
if options.j == 0:
if self.build_config.is_android:
# Adb isn't happy about multi-processed file pushing.
options.j = 1
else:
options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
def _bot_to_v8_mode(self, config):
"""Convert build configs from bots to configs understood by the v8 runner.
V8 configs are always lower case and without the additional _x64 suffix
for 64 bit builds on windows with ninja.
"""
mode = config[:-4] if config.endswith('_x64') else config
return mode.lower()
def _process_options(self, options):
pass
def _setup_env(self):
# Use the v8 root as cwd as some test cases use "load" with relative paths.
os.chdir(self.basedir)
# Many tests assume an English interface.
os.environ['LANG'] = 'en_US.UTF-8'
symbolizer_option = self._get_external_symbolizer_option()
if self.build_config.asan:
asan_options = [
symbolizer_option,
'allow_user_segv_handler=1',
'allocator_may_return_null=1',
]
if not utils.GuessOS() in ['macos', 'windows']:
# LSAN is not available on mac and windows.
asan_options.append('detect_leaks=1')
else:
asan_options.append('detect_leaks=0')
if utils.GuessOS() == 'windows':
# https://crbug.com/967663
asan_options.append('detect_stack_use_after_return=0')
os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
if self.build_config.cfi_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
'print_summary=1',
'symbolize=1',
symbolizer_option,
])
if self.build_config.ubsan_vptr:
os.environ['UBSAN_OPTIONS'] = ":".join([
'print_stacktrace=1',
symbolizer_option,
])
if self.build_config.msan:
os.environ['MSAN_OPTIONS'] = symbolizer_option
if self.build_config.tsan:
suppressions_file = os.path.join(
self.basedir,
'tools',
'sanitizers',
'tsan_suppressions.txt')
os.environ['TSAN_OPTIONS'] = " ".join([
symbolizer_option,
'suppressions=%s' % suppressions_file,
'exit_code=0',
'report_thread_leaks=0',
'history_size=7',
'report_destroy_locked=0',
])
def _get_external_symbolizer_option(self):
external_symbolizer_path = os.path.join(
self.basedir,
'third_party',
'llvm-build',
'Release+Asserts',
'bin',
'llvm-symbolizer',
)
if utils.IsWindows():
# Quote, because sanitizers might confuse colon as option separator.
external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
return 'external_symbolizer_path=%s' % external_symbolizer_path
def _parse_test_args(self, args):
if not args:
args = self._get_default_suite_names()
# Expand arguments with grouped tests. The args should reflect the list
# of suites as otherwise filters would break.
def expand_test_group(name):
return TEST_MAP.get(name, [name])
return reduce(list.__add__, map(expand_test_group, args), [])
def _args_to_suite_names(self, args, test_root):
# Use default tests if no test configuration was provided at the cmd line.
all_names = set(utils.GetSuitePaths(test_root))
args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
return [name for name in args_names if name in all_names]
def _get_default_suite_names(self):
return []
def _load_testsuite_generators(self, args, options):
names = self._args_to_suite_names(args, options.test_root)
test_config = self._create_test_config(options)
variables = self._get_statusfile_variables(options)
# Head generator with no elements
test_chain = testsuite.TestGenerator(0, [], [])
for name in names:
if options.verbose:
print('>>> Loading test suite: %s' % name)
suite = testsuite.TestSuite.Load(
os.path.join(options.test_root, name), test_config,
self.framework_name)
if self._is_testsuite_supported(suite, options):
tests = suite.load_tests_from_disk(variables)
test_chain.merge(tests)
return test_chain
def _is_testsuite_supported(self, suite, options):
"""A predicate that can be overridden to filter out unsupported TestSuite
instances (see NumFuzzer for usage)."""
return True
def _get_statusfile_variables(self, options):
simd_mips = (
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant == "r6" and
self.build_config.mips_use_msa)
mips_arch_variant = (
self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
self.build_config.mips_arch_variant)
# TODO(machenbach): In GN we can derive simulator run from
# target_arch != v8_target_arch in the dumped build config.
return {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"endurance_fuzzer": False,
"gc_fuzzer": False,
"gc_stress": False,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"is_clang": self.build_config.is_clang,
"is_full_debug": self.build_config.is_full_debug,
"mips_arch_variant": mips_arch_variant,
"mode": self.mode_options.status_mode
if not self.build_config.dcheck_always_on
else "debug",
"msan": self.build_config.msan,
"no_harness": options.no_harness,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": False,
"optimize_for_size": "--optimize-for-size" in options.extra_flags,
"predictable": self.build_config.predictable,
"simd_mips": simd_mips,
"simulator_run": False,
"system": self.target_os,
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
"embedded_builtins": self.build_config.embedded_builtins,
"verify_csa": self.build_config.verify_csa,
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
}
def _create_test_config(self, options):
timeout = options.timeout * self._timeout_scalefactor(options)
return TestConfig(
command_prefix=options.command_prefix,
extra_flags=options.extra_flags,
isolates=options.isolates,
mode_flags=self.mode_options.flags,
no_harness=options.no_harness,
noi18n=self.build_config.no_i18n,
random_seed=options.random_seed,
run_skipped=options.run_skipped,
shell_dir=self.outdir,
timeout=timeout,
verbose=options.verbose,
)
def _timeout_scalefactor(self, options):
"""Increases timeout for slow build configurations."""
factor = self.mode_options.timeout_scalefactor
if self.build_config.arch in SLOW_ARCHS:
factor *= 4
if self.build_config.lite_mode:
factor *= 2
if self.build_config.predictable:
factor *= 4
if self.build_config.use_sanitizer:
factor *= 1.5
if self.build_config.is_full_debug:
factor *= 4
return factor
# TODO(majeski): remove options & args parameters
def _do_execute(self, suites, args, options):
raise NotImplementedError()
def _prepare_procs(self, procs):
procs = filter(None, procs)
for i in range(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:
return None
return ShardProc(myid - 1, count)
def _get_shard_info(self, options):
"""
Returns pair:
(id of the current shard [1; number of shards], number of shards)
"""
# Read gtest shard configuration from environment (e.g. set by swarming).
# If none is present, use values passed on the command line.
shard_count = int(
os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
shard_run = os.environ.get('GTEST_SHARD_INDEX')
if shard_run is not None:
# The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
shard_run = int(shard_run) + 1
else:
shard_run = options.shard_run
if options.shard_count > 1:
# Log if a value was passed on the cmd line and it differs from the
# environment variables.
if options.shard_count != shard_count: # pragma: no cover
print("shard_count from cmd line differs from environment variable "
"GTEST_TOTAL_SHARDS")
if (options.shard_run > 1 and
options.shard_run != shard_run): # pragma: no cover
print("shard_run from cmd line differs from environment variable "
"GTEST_SHARD_INDEX")
if shard_run < 1 or shard_run > shard_count:
# TODO(machenbach): Turn this into an assert. If that's wrong on the
# bots, printing will be quite useless. Or refactor this code to make
# sure we get a return code != 0 after testing if we got here.
print("shard-run not a valid number, should be in [1:shard-count]")
print("defaulting back to running all tests")
return 1, 1
return shard_run, shard_count
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
if options.junitout:
procs.append(progress.JUnitTestProgressIndicator(options.junitout,
options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
options.json_test_results,
self.build_config.arch,
self.mode_options.execution_mode))
for proc in procs:
proc.configure(options)
for proc in procs:
try:
proc.set_test_count(test_count)
except AttributeError:
pass
return procs
def _create_result_tracker(self, options):
return progress.ResultsTracker(options.exit_after_n_failures)
def _create_timeout_proc(self, options):
if not options.total_timeout_sec:
return None
return TimeoutProc(options.total_timeout_sec)
def _create_signal_proc(self):
return SignalProc()
def _create_rerun_proc(self, options):
if not options.rerun_failures_count:
return None
return RerunProc(options.rerun_failures_count,
options.rerun_failures_max)
|
|
# -*- coding: utf-8 -*-
"""Validation classes for various types of data."""
from __future__ import unicode_literals
import re
from operator import attrgetter
from marshmallow.compat import basestring, text_type, zip_longest
from marshmallow.exceptions import ValidationError
class Validator(object):
"""Base abstract class for validators.
.. note::
This class does not provide any behavior. It is only used to
add a useful `__repr__` implementation for validators.
"""
def __repr__(self):
args = self._repr_args()
args = '{0}, '.format(args) if args else ''
return (
'<{self.__class__.__name__}({args}error={self.error!r})>'
.format(self=self, args=args)
)
def _repr_args(self):
"""A string representation of the args passed to this validator. Used by
`__repr__`.
"""
return ''
class URL(Validator):
"""Validate a URL.
:param bool relative: Whether to allow relative URLs.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}`.
"""
URL_REGEX = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:[^:@]+?:[^:@]*?@|)' # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
RELATIVE_URL_REGEX = re.compile(
r'^((?:http|ftp)s?://' # http:// or https://
r'(?:[^:@]+?:[^:@]*?@|)' # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE) # host is optional, allow for relative URLs
default_message = 'Invalid URL.'
def __init__(self, relative=False, error=None):
self.relative = relative
self.error = error or self.default_message
def _repr_args(self):
return 'relative={0!r}'.format(self.relative)
def _format_error(self, value):
return self.error.format(input=value)
def __call__(self, value):
message = self._format_error(value)
if not value:
raise ValidationError(message)
regex = self.RELATIVE_URL_REGEX if self.relative else self.URL_REGEX
if not regex.search(value):
raise ValidationError(message)
return value
class Email(Validator):
"""Validate an email address.
:param str error: Error message to raise in case of a validation error. Can be
interpolated with `{input}`.
"""
USER_REGEX = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9\w]+(\.[-!#$%&'*+/=?^_`{}|~0-9\w]+)*$" # dot-atom
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]'
r'|\\[\001-\011\013\014\016-\177])*"$)', re.IGNORECASE | re.UNICODE)
DOMAIN_REGEX = re.compile(
# domain
r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}|[A-Z0-9-]{2,})$'
# literal form, ipv4 address (SMTP 4.1.3)
r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE | re.UNICODE)
DOMAIN_WHITELIST = ('localhost',)
default_message = 'Invalid email address.'
def __init__(self, error=None):
self.error = error or self.default_message
def _format_error(self, value):
return self.error.format(input=value)
def __call__(self, value):
message = self._format_error(value)
if not value or '@' not in value:
raise ValidationError(message)
user_part, domain_part = value.rsplit('@', 1)
if not self.USER_REGEX.match(user_part):
raise ValidationError(message)
if domain_part not in self.DOMAIN_WHITELIST:
if not self.DOMAIN_REGEX.match(domain_part):
try:
domain_part = domain_part.encode('idna').decode('ascii')
except UnicodeError:
pass
else:
if self.DOMAIN_REGEX.match(domain_part):
return value
raise ValidationError(message)
return value
class Range(Validator):
"""Validator which succeeds if the value it is passed is greater
or equal to ``min`` and less than or equal to ``max``. If ``min``
is not specified, or is specified as `None`, no lower bound
exists. If ``max`` is not specified, or is specified as `None`,
no upper bound exists.
:param min: The minimum value (lower bound). If not provided, minimum
value will not be checked.
:param max: The maximum value (upper bound). If not provided, maximum
value will not be checked.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}`, `{min}` and `{max}`.
"""
message_min = 'Must be at least {min}.'
message_max = 'Must be at most {max}.'
message_all = 'Must be between {min} and {max}.'
def __init__(self, min=None, max=None, error=None):
self.min = min
self.max = max
self.error = error
def _repr_args(self):
return 'min={0!r}, max={1!r}'.format(self.min, self.max)
def _format_error(self, value, message):
return (self.error or message).format(input=value, min=self.min, max=self.max)
def __call__(self, value):
if self.min is not None and value < self.min:
message = self.message_min if self.max is None else self.message_all
raise ValidationError(self._format_error(value, message))
if self.max is not None and value > self.max:
message = self.message_max if self.min is None else self.message_all
raise ValidationError(self._format_error(value, message))
return value
class Length(Range):
"""Validator which succeeds if the value passed to it has a
length between a minimum and maximum. Uses len(), so it
can work for strings, lists, or anything with length.
:param int min: The minimum length. If not provided, minimum length
will not be checked.
:param int max: The maximum length. If not provided, maximum length
will not be checked.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}`, `{min}` and `{max}`.
"""
message_min = 'Shorter than minimum length {min}.'
message_max = 'Longer than maximum length {max}.'
message_all = 'Length must be between {min} and {max}.'
def __call__(self, value):
length = len(value)
if self.min is not None and length < self.min:
message = self.message_min if self.max is None else self.message_all
raise ValidationError(self._format_error(value, message))
if self.max is not None and length > self.max:
message = self.message_max if self.min is None else self.message_all
raise ValidationError(self._format_error(value, message))
return value
class Equal(Validator):
"""Validator which succeeds if the ``value`` passed to it is
equal to ``comparable``.
:param comparable: The object to compare to.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}` and `{other}`.
"""
default_message = 'Must be equal to {other}.'
def __init__(self, comparable, error=None):
self.comparable = comparable
self.error = error or self.default_message
def _repr_args(self):
return 'comparable={0!r}'.format(self.comparable)
def _format_error(self, value):
return self.error.format(input=value, other=self.comparable)
def __call__(self, value):
if value != self.comparable:
raise ValidationError(self._format_error(value))
return value
class Regexp(Validator):
"""Validate ``value`` against the provided regex.
:param regex: The regular expression string to use. Can also be a compiled
regular expression pattern.
:param flags: The regexp flags to use, for example re.IGNORECASE. Ignored
if ``regex`` is not a string.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}` and `{regex}`.
"""
default_message = 'String does not match expected pattern.'
def __init__(self, regex, flags=0, error=None):
self.regex = re.compile(regex, flags) if isinstance(regex, basestring) else regex
self.error = error or self.default_message
def _repr_args(self):
return 'regex={0!r}'.format(self.regex)
def _format_error(self, value):
return self.error.format(input=value, regex=self.regex.pattern)
def __call__(self, value):
if self.regex.match(value) is None:
raise ValidationError(self._format_error(value))
return value
class Predicate(Validator):
"""Call the specified ``method`` of the ``value`` object. The
validator succeeds if the invoked method returns an object that
evaluates to True in a Boolean context. Any additional keyword
argument will be passed to the method.
:param str method: The name of the method to invoke.
:param str error: Error message to raise in case of a validation error.
Can be interpolated with `{input}` and `{method}`.
:param kwargs: Additional keyword arguments to pass to the method.
"""
default_message = 'Invalid input.'
def __init__(self, method, error=None, **kwargs):
self.method = method
self.error = error or self.default_message
self.kwargs = kwargs
def _repr_args(self):
return 'method={0!r}, kwargs={1!r}'.format(self.method, self.kwargs)
def _format_error(self, value):
return self.error.format(input=value, method=self.method)
def __call__(self, value):
method = getattr(value, self.method)
if not method(**self.kwargs):
raise ValidationError(self._format_error(value))
return value
class NoneOf(Validator):
"""Validator which fails if ``value`` is a member of ``iterable``.
:param iterable iterable: A sequence of invalid values.
:param str error: Error message to raise in case of a validation error. Can be
interpolated using `{input}` and `{values}`.
"""
default_message = 'Invalid input.'
def __init__(self, iterable, error=None):
self.iterable = iterable
self.values_text = ', '.join(text_type(each) for each in self.iterable)
self.error = error or self.default_message
def _repr_args(self):
return 'iterable={0!r}'.format(self.iterable)
def _format_error(self, value):
return self.error.format(
input=value,
values=self.values_text,
)
def __call__(self, value):
try:
if value in self.iterable:
raise ValidationError(self._format_error(value))
except TypeError:
pass
return value
class OneOf(Validator):
"""Validator which succeeds if ``value`` is a member of ``choices``.
:param iterable choices: A sequence of valid values.
:param iterable labels: Optional sequence of labels to pair with the choices.
:param str error: Error message to raise in case of a validation error. Can be
interpolated using `{input}`, `{choices}` and `{labels}`.
"""
default_message = 'Not a valid choice.'
def __init__(self, choices, labels=None, error=None):
self.choices = choices
self.choices_text = ', '.join(text_type(choice) for choice in self.choices)
self.labels = labels if labels is not None else []
self.labels_text = ', '.join(text_type(label) for label in self.labels)
self.error = error or self.default_message
def _repr_args(self):
return 'choices={0!r}, labels={1!r}'.format(self.choices, self.labels)
def _format_error(self, value):
return self.error.format(
input=value,
choices=self.choices_text,
labels=self.labels_text,
)
def __call__(self, value):
try:
if value not in self.choices:
raise ValidationError(self._format_error(value))
except TypeError:
raise ValidationError(self._format_error(value))
return value
def options(self, valuegetter=text_type):
"""Return a generator over the (value, label) pairs, where value
is a string associated with each choice. This convenience method
is useful to populate, for instance, a form select field.
:param valuegetter: Can be a callable or a string. In the former case, it must
be a one-argument callable which returns the value of a
choice. In the latter case, the string specifies the name
of an attribute of the choice objects. Defaults to `str()`
or `unicode()`.
"""
valuegetter = valuegetter if callable(valuegetter) else attrgetter(valuegetter)
pairs = zip_longest(self.choices, self.labels, fillvalue='')
return ((valuegetter(choice), label) for choice, label in pairs)
class ContainsOnly(OneOf):
"""Validator which succeeds if ``value`` is a sequence and each element
in the sequence is also in the sequence passed as ``choices``.
:param iterable choices: Same as :class:`OneOf`.
:param iterable labels: Same as :class:`OneOf`.
:param str error: Same as :class:`OneOf`.
"""
default_message = 'One or more of the choices you made was not acceptable.'
def _format_error(self, value):
value_text = ', '.join(text_type(val) for val in value)
return super(ContainsOnly, self)._format_error(value_text)
def __call__(self, value):
choices = list(self.choices)
if not value and choices:
raise ValidationError(self._format_error(value))
# We check list.index instead of using set.issubset so that
# unhashable types are handled.
for val in value:
try:
index = choices.index(val)
except ValueError:
raise ValidationError(self._format_error(value))
else:
del choices[index]
return value
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.analytics.admin.v1alpha",
manifest={
"IndustryCategory",
"ServiceLevel",
"ActorType",
"ActionType",
"ChangeHistoryResourceType",
"GoogleSignalsState",
"GoogleSignalsConsent",
"LinkProposalInitiatingProduct",
"LinkProposalState",
"Account",
"Property",
"DataStream",
"UserLink",
"AuditUserLink",
"FirebaseLink",
"GlobalSiteTag",
"GoogleAdsLink",
"DataSharingSettings",
"AccountSummary",
"PropertySummary",
"MeasurementProtocolSecret",
"ChangeHistoryEvent",
"ChangeHistoryChange",
"DisplayVideo360AdvertiserLink",
"DisplayVideo360AdvertiserLinkProposal",
"LinkProposalStatusDetails",
"ConversionEvent",
"GoogleSignalsSettings",
"CustomDimension",
"CustomMetric",
"DataRetentionSettings",
},
)
class IndustryCategory(proto.Enum):
r"""The category selected for this property, used for industry
benchmarking.
"""
INDUSTRY_CATEGORY_UNSPECIFIED = 0
AUTOMOTIVE = 1
BUSINESS_AND_INDUSTRIAL_MARKETS = 2
FINANCE = 3
HEALTHCARE = 4
TECHNOLOGY = 5
TRAVEL = 6
OTHER = 7
ARTS_AND_ENTERTAINMENT = 8
BEAUTY_AND_FITNESS = 9
BOOKS_AND_LITERATURE = 10
FOOD_AND_DRINK = 11
GAMES = 12
HOBBIES_AND_LEISURE = 13
HOME_AND_GARDEN = 14
INTERNET_AND_TELECOM = 15
LAW_AND_GOVERNMENT = 16
NEWS = 17
ONLINE_COMMUNITIES = 18
PEOPLE_AND_SOCIETY = 19
PETS_AND_ANIMALS = 20
REAL_ESTATE = 21
REFERENCE = 22
SCIENCE = 23
SPORTS = 24
JOBS_AND_EDUCATION = 25
SHOPPING = 26
class ServiceLevel(proto.Enum):
r"""Various levels of service for Google Analytics."""
SERVICE_LEVEL_UNSPECIFIED = 0
GOOGLE_ANALYTICS_STANDARD = 1
GOOGLE_ANALYTICS_360 = 2
class ActorType(proto.Enum):
r"""Different kinds of actors that can make changes to Google
Analytics resources.
"""
ACTOR_TYPE_UNSPECIFIED = 0
USER = 1
SYSTEM = 2
SUPPORT = 3
class ActionType(proto.Enum):
r"""Types of actions that may change a resource."""
ACTION_TYPE_UNSPECIFIED = 0
CREATED = 1
UPDATED = 2
DELETED = 3
class ChangeHistoryResourceType(proto.Enum):
r"""Types of resources whose changes may be returned from change
history.
"""
CHANGE_HISTORY_RESOURCE_TYPE_UNSPECIFIED = 0
ACCOUNT = 1
PROPERTY = 2
FIREBASE_LINK = 6
GOOGLE_ADS_LINK = 7
GOOGLE_SIGNALS_SETTINGS = 8
CONVERSION_EVENT = 9
MEASUREMENT_PROTOCOL_SECRET = 10
CUSTOM_DIMENSION = 11
CUSTOM_METRIC = 12
DATA_RETENTION_SETTINGS = 13
DISPLAY_VIDEO_360_ADVERTISER_LINK = 14
DISPLAY_VIDEO_360_ADVERTISER_LINK_PROPOSAL = 15
DATA_STREAM = 18
class GoogleSignalsState(proto.Enum):
r"""Status of the Google Signals settings (i.e., whether this
feature has been enabled for the property).
"""
GOOGLE_SIGNALS_STATE_UNSPECIFIED = 0
GOOGLE_SIGNALS_ENABLED = 1
GOOGLE_SIGNALS_DISABLED = 2
class GoogleSignalsConsent(proto.Enum):
r"""Consent field of the Google Signals settings (i.e., whether
the user has consented to the Google Signals terms of service.)
"""
GOOGLE_SIGNALS_CONSENT_UNSPECIFIED = 0
GOOGLE_SIGNALS_CONSENT_CONSENTED = 2
GOOGLE_SIGNALS_CONSENT_NOT_CONSENTED = 1
class LinkProposalInitiatingProduct(proto.Enum):
r"""An indication of which product the user initiated a link
proposal from.
"""
LINK_PROPOSAL_INITIATING_PRODUCT_UNSPECIFIED = 0
GOOGLE_ANALYTICS = 1
LINKED_PRODUCT = 2
class LinkProposalState(proto.Enum):
r"""The state of a link proposal resource."""
LINK_PROPOSAL_STATE_UNSPECIFIED = 0
AWAITING_REVIEW_FROM_GOOGLE_ANALYTICS = 1
AWAITING_REVIEW_FROM_LINKED_PRODUCT = 2
WITHDRAWN = 3
DECLINED = 4
EXPIRED = 5
OBSOLETE = 6
class Account(proto.Message):
r"""A resource message representing a Google Analytics account.
Attributes:
name (str):
Output only. Resource name of this account.
Format: accounts/{account}
Example: "accounts/100".
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this account was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when account payload fields
were last updated.
display_name (str):
Required. Human-readable display name for
this account.
region_code (str):
Country of business. Must be a Unicode CLDR
region code.
deleted (bool):
Output only. Indicates whether this Account
is soft-deleted or not. Deleted accounts are
excluded from List results unless specifically
requested.
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
display_name = proto.Field(proto.STRING, number=4,)
region_code = proto.Field(proto.STRING, number=5,)
deleted = proto.Field(proto.BOOL, number=6,)
class Property(proto.Message):
r"""A resource message representing a Google Analytics GA4
property.
Attributes:
name (str):
Output only. Resource name of this property. Format:
properties/{property_id} Example: "properties/1000".
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the entity was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when entity payload fields
were last updated.
parent (str):
Immutable. Resource name of this property's
logical parent.
Note: The Property-Moving UI can be used to
change the parent. Format: accounts/{account}
Example: "accounts/100".
display_name (str):
Required. Human-readable display name for
this property.
The max allowed display name length is 100
UTF-16 code units.
industry_category (google.analytics.admin_v1alpha.types.IndustryCategory):
Industry associated with this property Example: AUTOMOTIVE,
FOOD_AND_DRINK
time_zone (str):
Required. Reporting Time Zone, used as the day boundary for
reports, regardless of where the data originates. If the
time zone honors DST, Analytics will automatically adjust
for the changes.
NOTE: Changing the time zone only affects data going
forward, and is not applied retroactively.
Format: https://www.iana.org/time-zones Example:
"America/Los_Angeles".
currency_code (str):
The currency type used in reports involving monetary values.
Format: https://en.wikipedia.org/wiki/ISO_4217 Examples:
"USD", "EUR", "JPY".
service_level (google.analytics.admin_v1alpha.types.ServiceLevel):
Output only. The Google Analytics service
level that applies to this property.
delete_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. If set, the time at which this
property was trashed. If not set, then this
property is not currently in the trash can.
expire_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. If set, the time at which this
trashed property will be permanently deleted. If
not set, then this property is not currently in
the trash can and is not slated to be deleted.
account (str):
Immutable. The resource name of the parent account Format:
accounts/{account_id} Example: "accounts/123".
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
parent = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=5,)
industry_category = proto.Field(proto.ENUM, number=6, enum="IndustryCategory",)
time_zone = proto.Field(proto.STRING, number=7,)
currency_code = proto.Field(proto.STRING, number=8,)
service_level = proto.Field(proto.ENUM, number=10, enum="ServiceLevel",)
delete_time = proto.Field(
proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,
)
expire_time = proto.Field(
proto.MESSAGE, number=12, message=timestamp_pb2.Timestamp,
)
account = proto.Field(proto.STRING, number=13,)
class DataStream(proto.Message):
r"""A resource message representing a data stream.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
web_stream_data (google.analytics.admin_v1alpha.types.DataStream.WebStreamData):
Data specific to web streams. Must be populated if type is
WEB_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
android_app_stream_data (google.analytics.admin_v1alpha.types.DataStream.AndroidAppStreamData):
Data specific to Android app streams. Must be populated if
type is ANDROID_APP_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
ios_app_stream_data (google.analytics.admin_v1alpha.types.DataStream.IosAppStreamData):
Data specific to iOS app streams. Must be populated if type
is IOS_APP_DATA_STREAM.
This field is a member of `oneof`_ ``stream_data``.
name (str):
Output only. Resource name of this Data Stream. Format:
properties/{property_id}/dataStreams/{stream_id} Example:
"properties/1000/dataStreams/2000".
type_ (google.analytics.admin_v1alpha.types.DataStream.DataStreamType):
Required. Immutable. The type of this
DataStream resource.
display_name (str):
Human-readable display name for the Data
Stream.
Required for web data streams.
The max allowed display name length is 255
UTF-16 code units.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this stream was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when stream payload fields
were last updated.
"""
class DataStreamType(proto.Enum):
r"""The type of the data stream."""
DATA_STREAM_TYPE_UNSPECIFIED = 0
WEB_DATA_STREAM = 1
ANDROID_APP_DATA_STREAM = 2
IOS_APP_DATA_STREAM = 3
class WebStreamData(proto.Message):
r"""Data specific to web streams.
Attributes:
measurement_id (str):
Output only. Analytics "Measurement ID",
without the "G-" prefix. Example: "G-1A2BCD345E"
would just be "1A2BCD345E".
firebase_app_id (str):
Output only. ID of the corresponding web app
in Firebase, if any. This ID can change if the
web app is deleted and recreated.
default_uri (str):
Immutable. Domain name of the web app being
measured, or empty. Example:
"http://www.google.com",
"https://www.google.com".
"""
measurement_id = proto.Field(proto.STRING, number=1,)
firebase_app_id = proto.Field(proto.STRING, number=2,)
default_uri = proto.Field(proto.STRING, number=3,)
class AndroidAppStreamData(proto.Message):
r"""Data specific to Android app streams.
Attributes:
firebase_app_id (str):
Output only. ID of the corresponding Android
app in Firebase, if any. This ID can change if
the Android app is deleted and recreated.
package_name (str):
Immutable. The package name for the app being
measured. Example: "com.example.myandroidapp".
"""
firebase_app_id = proto.Field(proto.STRING, number=1,)
package_name = proto.Field(proto.STRING, number=2,)
class IosAppStreamData(proto.Message):
r"""Data specific to iOS app streams.
Attributes:
firebase_app_id (str):
Output only. ID of the corresponding iOS app
in Firebase, if any. This ID can change if the
iOS app is deleted and recreated.
bundle_id (str):
Required. Immutable. The Apple App Store
Bundle ID for the app Example:
"com.example.myiosapp".
"""
firebase_app_id = proto.Field(proto.STRING, number=1,)
bundle_id = proto.Field(proto.STRING, number=2,)
web_stream_data = proto.Field(
proto.MESSAGE, number=6, oneof="stream_data", message=WebStreamData,
)
android_app_stream_data = proto.Field(
proto.MESSAGE, number=7, oneof="stream_data", message=AndroidAppStreamData,
)
ios_app_stream_data = proto.Field(
proto.MESSAGE, number=8, oneof="stream_data", message=IosAppStreamData,
)
name = proto.Field(proto.STRING, number=1,)
type_ = proto.Field(proto.ENUM, number=2, enum=DataStreamType,)
display_name = proto.Field(proto.STRING, number=3,)
create_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp,)
class UserLink(proto.Message):
r"""A resource message representing a user's permissions on an
Account or Property resource.
Attributes:
name (str):
Output only. Example format:
properties/1234/userLinks/5678
email_address (str):
Immutable. Email address of the user to link
direct_roles (Sequence[str]):
Roles directly assigned to this user for this account or
property.
Valid values: predefinedRoles/viewer predefinedRoles/analyst
predefinedRoles/editor predefinedRoles/admin
predefinedRoles/no-cost-data predefinedRoles/no-revenue-data
Excludes roles that are inherited from a higher-level
entity, group, or organization admin role.
A UserLink that is updated to have an empty list of
direct_roles will be deleted.
"""
name = proto.Field(proto.STRING, number=1,)
email_address = proto.Field(proto.STRING, number=2,)
direct_roles = proto.RepeatedField(proto.STRING, number=3,)
class AuditUserLink(proto.Message):
r"""Read-only resource used to summarize a principal's effective
roles.
Attributes:
name (str):
Example format:
properties/1234/userLinks/5678
email_address (str):
Email address of the linked user
direct_roles (Sequence[str]):
Roles directly assigned to this user for this
entity.
Format: predefinedRoles/viewer
Excludes roles that are inherited from an
account (if this is for a property), group, or
organization admin role.
effective_roles (Sequence[str]):
Union of all permissions a user has at this
account or property (includes direct
permissions, group-inherited permissions, etc.).
Format: predefinedRoles/viewer
"""
name = proto.Field(proto.STRING, number=1,)
email_address = proto.Field(proto.STRING, number=2,)
direct_roles = proto.RepeatedField(proto.STRING, number=3,)
effective_roles = proto.RepeatedField(proto.STRING, number=4,)
class FirebaseLink(proto.Message):
r"""A link between a GA4 property and a Firebase project.
Attributes:
name (str):
Output only. Example format:
properties/1234/firebaseLinks/5678
project (str):
Immutable. Firebase project resource name. When creating a
FirebaseLink, you may provide this resource name using
either a project number or project ID. Once this resource
has been created, returned FirebaseLinks will always have a
project_name that contains a project number.
Format: 'projects/{project number}' Example: 'projects/1234'
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this FirebaseLink was
originally created.
"""
name = proto.Field(proto.STRING, number=1,)
project = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
class GlobalSiteTag(proto.Message):
r"""Read-only resource with the tag for sending data from a
website to a DataStream. Only present for web DataStream
resources.
Attributes:
name (str):
Output only. Resource name for this GlobalSiteTag resource.
Format:
properties/{property_id}/dataStreams/{stream_id}/globalSiteTag
Example: "properties/123/dataStreams/456/globalSiteTag".
snippet (str):
Immutable. JavaScript code snippet to be
pasted as the first item into the head tag of
every webpage to measure.
"""
name = proto.Field(proto.STRING, number=1,)
snippet = proto.Field(proto.STRING, number=2,)
class GoogleAdsLink(proto.Message):
r"""A link between a GA4 property and a Google Ads account.
Attributes:
name (str):
Output only. Format:
properties/{propertyId}/googleAdsLinks/{googleAdsLinkId}
Note: googleAdsLinkId is not the Google Ads
customer ID.
customer_id (str):
Immutable. Google Ads customer ID.
can_manage_clients (bool):
Output only. If true, this link is for a
Google Ads manager account.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Enable personalized advertising features with
this integration. Automatically publish my
Google Analytics audience lists and Google
Analytics remarketing events/parameters to the
linked Google Ads account. If this field is not
set on create/update, it will be defaulted to
true.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this link was
originally created.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this link was last
updated.
creator_email_address (str):
Output only. Email address of the user that
created the link. An empty string will be
returned if the email address can't be
retrieved.
"""
name = proto.Field(proto.STRING, number=1,)
customer_id = proto.Field(proto.STRING, number=3,)
can_manage_clients = proto.Field(proto.BOOL, number=4,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue,
)
create_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(proto.MESSAGE, number=8, message=timestamp_pb2.Timestamp,)
creator_email_address = proto.Field(proto.STRING, number=9,)
class DataSharingSettings(proto.Message):
r"""A resource message representing data sharing settings of a
Google Analytics account.
Attributes:
name (str):
Output only. Resource name.
Format: accounts/{account}/dataSharingSettings
Example: "accounts/1000/dataSharingSettings".
sharing_with_google_support_enabled (bool):
Allows Google support to access the data in
order to help troubleshoot issues.
sharing_with_google_assigned_sales_enabled (bool):
Allows Google sales teams that are assigned
to the customer to access the data in order to
suggest configuration changes to improve
results. Sales team restrictions still apply
when enabled.
sharing_with_google_any_sales_enabled (bool):
Allows any of Google sales to access the data
in order to suggest configuration changes to
improve results.
sharing_with_google_products_enabled (bool):
Allows Google to use the data to improve
other Google products or services.
sharing_with_others_enabled (bool):
Allows Google to share the data anonymously
in aggregate form with others.
"""
name = proto.Field(proto.STRING, number=1,)
sharing_with_google_support_enabled = proto.Field(proto.BOOL, number=2,)
sharing_with_google_assigned_sales_enabled = proto.Field(proto.BOOL, number=3,)
sharing_with_google_any_sales_enabled = proto.Field(proto.BOOL, number=4,)
sharing_with_google_products_enabled = proto.Field(proto.BOOL, number=5,)
sharing_with_others_enabled = proto.Field(proto.BOOL, number=6,)
class AccountSummary(proto.Message):
r"""A virtual resource representing an overview of an account and
all its child GA4 properties.
Attributes:
name (str):
Resource name for this account summary. Format:
accountSummaries/{account_id} Example:
"accountSummaries/1000".
account (str):
Resource name of account referred to by this account summary
Format: accounts/{account_id} Example: "accounts/1000".
display_name (str):
Display name for the account referred to in
this account summary.
property_summaries (Sequence[google.analytics.admin_v1alpha.types.PropertySummary]):
List of summaries for child accounts of this
account.
"""
name = proto.Field(proto.STRING, number=1,)
account = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
property_summaries = proto.RepeatedField(
proto.MESSAGE, number=4, message="PropertySummary",
)
class PropertySummary(proto.Message):
r"""A virtual resource representing metadata for a GA4 property.
Attributes:
property (str):
Resource name of property referred to by this property
summary Format: properties/{property_id} Example:
"properties/1000".
display_name (str):
Display name for the property referred to in
this property summary.
"""
property = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
class MeasurementProtocolSecret(proto.Message):
r"""A secret value used for sending hits to Measurement Protocol.
Attributes:
name (str):
Output only. Resource name of this secret.
This secret may be a child of any type of
stream. Format:
properties/{property}/webDataStreams/{webDataStream}/measurementProtocolSecrets/{measurementProtocolSecret}
display_name (str):
Required. Human-readable display name for
this secret.
secret_value (str):
Output only. The measurement protocol secret value. Pass
this value to the api_secret field of the Measurement
Protocol API when sending hits to this secret's parent
property.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
secret_value = proto.Field(proto.STRING, number=3,)
class ChangeHistoryEvent(proto.Message):
r"""A set of changes within a Google Analytics account or its
child properties that resulted from the same cause. Common
causes would be updates made in the Google Analytics UI, changes
from customer support, or automatic Google Analytics system
changes.
Attributes:
id (str):
ID of this change history event. This ID is
unique across Google Analytics.
change_time (google.protobuf.timestamp_pb2.Timestamp):
Time when change was made.
actor_type (google.analytics.admin_v1alpha.types.ActorType):
The type of actor that made this change.
user_actor_email (str):
Email address of the Google account that made
the change. This will be a valid email address
if the actor field is set to USER, and empty
otherwise. Google accounts that have been
deleted will cause an error.
changes_filtered (bool):
If true, then the list of changes returned
was filtered, and does not represent all changes
that occurred in this event.
changes (Sequence[google.analytics.admin_v1alpha.types.ChangeHistoryChange]):
A list of changes made in this change history
event that fit the filters specified in
SearchChangeHistoryEventsRequest.
"""
id = proto.Field(proto.STRING, number=1,)
change_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
actor_type = proto.Field(proto.ENUM, number=3, enum="ActorType",)
user_actor_email = proto.Field(proto.STRING, number=4,)
changes_filtered = proto.Field(proto.BOOL, number=5,)
changes = proto.RepeatedField(
proto.MESSAGE, number=6, message="ChangeHistoryChange",
)
class ChangeHistoryChange(proto.Message):
r"""A description of a change to a single Google Analytics
resource.
Attributes:
resource (str):
Resource name of the resource whose changes
are described by this entry.
action (google.analytics.admin_v1alpha.types.ActionType):
The type of action that changed this
resource.
resource_before_change (google.analytics.admin_v1alpha.types.ChangeHistoryChange.ChangeHistoryResource):
Resource contents from before the change was
made. If this resource was created in this
change, this field will be missing.
resource_after_change (google.analytics.admin_v1alpha.types.ChangeHistoryChange.ChangeHistoryResource):
Resource contents from after the change was
made. If this resource was deleted in this
change, this field will be missing.
"""
class ChangeHistoryResource(proto.Message):
r"""A snapshot of a resource as before or after the result of a
change in change history.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
account (google.analytics.admin_v1alpha.types.Account):
A snapshot of an Account resource in change
history.
This field is a member of `oneof`_ ``resource``.
property (google.analytics.admin_v1alpha.types.Property):
A snapshot of a Property resource in change
history.
This field is a member of `oneof`_ ``resource``.
firebase_link (google.analytics.admin_v1alpha.types.FirebaseLink):
A snapshot of a FirebaseLink resource in
change history.
This field is a member of `oneof`_ ``resource``.
google_ads_link (google.analytics.admin_v1alpha.types.GoogleAdsLink):
A snapshot of a GoogleAdsLink resource in
change history.
This field is a member of `oneof`_ ``resource``.
google_signals_settings (google.analytics.admin_v1alpha.types.GoogleSignalsSettings):
A snapshot of a GoogleSignalsSettings
resource in change history.
This field is a member of `oneof`_ ``resource``.
display_video_360_advertiser_link (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink):
A snapshot of a DisplayVideo360AdvertiserLink
resource in change history.
This field is a member of `oneof`_ ``resource``.
display_video_360_advertiser_link_proposal (google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal):
A snapshot of a
DisplayVideo360AdvertiserLinkProposal resource
in change history.
This field is a member of `oneof`_ ``resource``.
conversion_event (google.analytics.admin_v1alpha.types.ConversionEvent):
A snapshot of a ConversionEvent resource in
change history.
This field is a member of `oneof`_ ``resource``.
measurement_protocol_secret (google.analytics.admin_v1alpha.types.MeasurementProtocolSecret):
A snapshot of a MeasurementProtocolSecret
resource in change history.
This field is a member of `oneof`_ ``resource``.
custom_dimension (google.analytics.admin_v1alpha.types.CustomDimension):
A snapshot of a CustomDimension resource in
change history.
This field is a member of `oneof`_ ``resource``.
custom_metric (google.analytics.admin_v1alpha.types.CustomMetric):
A snapshot of a CustomMetric resource in
change history.
This field is a member of `oneof`_ ``resource``.
data_retention_settings (google.analytics.admin_v1alpha.types.DataRetentionSettings):
A snapshot of a data retention settings
resource in change history.
This field is a member of `oneof`_ ``resource``.
data_stream (google.analytics.admin_v1alpha.types.DataStream):
A snapshot of a DataStream resource in change
history.
This field is a member of `oneof`_ ``resource``.
"""
account = proto.Field(
proto.MESSAGE, number=1, oneof="resource", message="Account",
)
property = proto.Field(
proto.MESSAGE, number=2, oneof="resource", message="Property",
)
firebase_link = proto.Field(
proto.MESSAGE, number=6, oneof="resource", message="FirebaseLink",
)
google_ads_link = proto.Field(
proto.MESSAGE, number=7, oneof="resource", message="GoogleAdsLink",
)
google_signals_settings = proto.Field(
proto.MESSAGE, number=8, oneof="resource", message="GoogleSignalsSettings",
)
display_video_360_advertiser_link = proto.Field(
proto.MESSAGE,
number=9,
oneof="resource",
message="DisplayVideo360AdvertiserLink",
)
display_video_360_advertiser_link_proposal = proto.Field(
proto.MESSAGE,
number=10,
oneof="resource",
message="DisplayVideo360AdvertiserLinkProposal",
)
conversion_event = proto.Field(
proto.MESSAGE, number=11, oneof="resource", message="ConversionEvent",
)
measurement_protocol_secret = proto.Field(
proto.MESSAGE,
number=12,
oneof="resource",
message="MeasurementProtocolSecret",
)
custom_dimension = proto.Field(
proto.MESSAGE, number=13, oneof="resource", message="CustomDimension",
)
custom_metric = proto.Field(
proto.MESSAGE, number=14, oneof="resource", message="CustomMetric",
)
data_retention_settings = proto.Field(
proto.MESSAGE, number=15, oneof="resource", message="DataRetentionSettings",
)
data_stream = proto.Field(
proto.MESSAGE, number=18, oneof="resource", message="DataStream",
)
resource = proto.Field(proto.STRING, number=1,)
action = proto.Field(proto.ENUM, number=2, enum="ActionType",)
resource_before_change = proto.Field(
proto.MESSAGE, number=3, message=ChangeHistoryResource,
)
resource_after_change = proto.Field(
proto.MESSAGE, number=4, message=ChangeHistoryResource,
)
class DisplayVideo360AdvertiserLink(proto.Message):
r"""A link between a GA4 property and a Display & Video 360
advertiser.
Attributes:
name (str):
Output only. The resource name for this
DisplayVideo360AdvertiserLink resource. Format:
properties/{propertyId}/displayVideo360AdvertiserLinks/{linkId}
Note: linkId is not the Display & Video 360
Advertiser ID
advertiser_id (str):
Immutable. The Display & Video 360
Advertiser's advertiser ID.
advertiser_display_name (str):
Output only. The display name of the Display
& Video 360 Advertiser.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Enables personalized advertising features
with this integration. If this field is not set
on create/update, it will be defaulted to true.
campaign_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of campaign
data from Display & Video 360 into the GA4
property. After link creation, this can only be
updated from the Display & Video 360 product.
If this field is not set on create, it will be
defaulted to true.
cost_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of cost data from Display &
Video 360 into the GA4 property. This can only be enabled if
campaign_data_sharing_enabled is enabled. After link
creation, this can only be updated from the Display & Video
360 product. If this field is not set on create, it will be
defaulted to true.
"""
name = proto.Field(proto.STRING, number=1,)
advertiser_id = proto.Field(proto.STRING, number=2,)
advertiser_display_name = proto.Field(proto.STRING, number=3,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=4, message=wrappers_pb2.BoolValue,
)
campaign_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=5, message=wrappers_pb2.BoolValue,
)
cost_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue,
)
class DisplayVideo360AdvertiserLinkProposal(proto.Message):
r"""A proposal for a link between a GA4 property and a Display &
Video 360 advertiser.
A proposal is converted to a DisplayVideo360AdvertiserLink once
approved. Google Analytics admins approve inbound proposals
while Display & Video 360 admins approve outbound proposals.
Attributes:
name (str):
Output only. The resource name for this
DisplayVideo360AdvertiserLinkProposal resource.
Format:
properties/{propertyId}/displayVideo360AdvertiserLinkProposals/{proposalId}
Note: proposalId is not the Display & Video 360
Advertiser ID
advertiser_id (str):
Immutable. The Display & Video 360
Advertiser's advertiser ID.
link_proposal_status_details (google.analytics.admin_v1alpha.types.LinkProposalStatusDetails):
Output only. The status information for this
link proposal.
advertiser_display_name (str):
Output only. The display name of the Display
& Video Advertiser. Only populated for proposals
that originated from Display & Video 360.
validation_email (str):
Input only. On a proposal being sent to
Display & Video 360, this field must be set to
the email address of an admin on the target
advertiser. This is used to verify that the
Google Analytics admin is aware of at least one
admin on the Display & Video 360 Advertiser.
This does not restrict approval of the proposal
to a single user. Any admin on the Display &
Video 360 Advertiser may approve the proposal.
ads_personalization_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables personalized advertising
features with this integration. If this field is
not set on create, it will be defaulted to true.
campaign_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of campaign
data from Display & Video 360. If this field is
not set on create, it will be defaulted to true.
cost_data_sharing_enabled (google.protobuf.wrappers_pb2.BoolValue):
Immutable. Enables the import of cost data from Display &
Video 360. This can only be enabled if
campaign_data_sharing_enabled is enabled. If this field is
not set on create, it will be defaulted to true.
"""
name = proto.Field(proto.STRING, number=1,)
advertiser_id = proto.Field(proto.STRING, number=2,)
link_proposal_status_details = proto.Field(
proto.MESSAGE, number=3, message="LinkProposalStatusDetails",
)
advertiser_display_name = proto.Field(proto.STRING, number=4,)
validation_email = proto.Field(proto.STRING, number=5,)
ads_personalization_enabled = proto.Field(
proto.MESSAGE, number=6, message=wrappers_pb2.BoolValue,
)
campaign_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=7, message=wrappers_pb2.BoolValue,
)
cost_data_sharing_enabled = proto.Field(
proto.MESSAGE, number=8, message=wrappers_pb2.BoolValue,
)
class LinkProposalStatusDetails(proto.Message):
r"""Status information for a link proposal.
Attributes:
link_proposal_initiating_product (google.analytics.admin_v1alpha.types.LinkProposalInitiatingProduct):
Output only. The source of this proposal.
requestor_email (str):
Output only. The email address of the user
that proposed this linkage.
link_proposal_state (google.analytics.admin_v1alpha.types.LinkProposalState):
Output only. The state of this proposal.
"""
link_proposal_initiating_product = proto.Field(
proto.ENUM, number=1, enum="LinkProposalInitiatingProduct",
)
requestor_email = proto.Field(proto.STRING, number=2,)
link_proposal_state = proto.Field(proto.ENUM, number=3, enum="LinkProposalState",)
class ConversionEvent(proto.Message):
r"""A conversion event in a Google Analytics property.
Attributes:
name (str):
Output only. Resource name of this conversion event. Format:
properties/{property}/conversionEvents/{conversion_event}
event_name (str):
Immutable. The event name for this conversion
event. Examples: 'click', 'purchase'
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when this conversion event
was created in the property.
deletable (bool):
Output only. If set, this event can currently
be deleted via DeleteConversionEvent.
custom (bool):
Output only. If set to true, this conversion
event refers to a custom event. If set to
false, this conversion event refers to a default
event in GA. Default events typically have
special meaning in GA. Default events are
usually created for you by the GA system, but in
some cases can be created by property admins.
Custom events count towards the maximum number
of custom conversion events that may be created
per property.
"""
name = proto.Field(proto.STRING, number=1,)
event_name = proto.Field(proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
deletable = proto.Field(proto.BOOL, number=4,)
custom = proto.Field(proto.BOOL, number=5,)
class GoogleSignalsSettings(proto.Message):
r"""Settings values for Google Signals. This is a singleton
resource.
Attributes:
name (str):
Output only. Resource name of this setting. Format:
properties/{property_id}/googleSignalsSettings Example:
"properties/1000/googleSignalsSettings".
state (google.analytics.admin_v1alpha.types.GoogleSignalsState):
Status of this setting.
consent (google.analytics.admin_v1alpha.types.GoogleSignalsConsent):
Output only. Terms of Service acceptance.
"""
name = proto.Field(proto.STRING, number=1,)
state = proto.Field(proto.ENUM, number=3, enum="GoogleSignalsState",)
consent = proto.Field(proto.ENUM, number=4, enum="GoogleSignalsConsent",)
class CustomDimension(proto.Message):
r"""A definition for a CustomDimension.
Attributes:
name (str):
Output only. Resource name for this
CustomDimension resource. Format:
properties/{property}/customDimensions/{customDimension}
parameter_name (str):
Required. Immutable. Tagging parameter name
for this custom dimension.
If this is a user-scoped dimension, then this is
the user property name. If this is an
event-scoped dimension, then this is the event
parameter name.
May only contain alphanumeric and underscore
characters, starting with a letter. Max length
of 24 characters for user-scoped dimensions, 40
characters for event-scoped dimensions.
display_name (str):
Required. Display name for this custom
dimension as shown in the Analytics UI. Max
length of 82 characters, alphanumeric plus space
and underscore starting with a letter. Legacy
system-generated display names may contain
square brackets, but updates to this field will
never permit square brackets.
description (str):
Optional. Description for this custom
dimension. Max length of 150 characters.
scope (google.analytics.admin_v1alpha.types.CustomDimension.DimensionScope):
Required. Immutable. The scope of this
dimension.
disallow_ads_personalization (bool):
Optional. If set to true, sets this dimension
as NPA and excludes it from ads personalization.
This is currently only supported by user-scoped
custom dimensions.
"""
class DimensionScope(proto.Enum):
r"""Valid values for the scope of this dimension."""
DIMENSION_SCOPE_UNSPECIFIED = 0
EVENT = 1
USER = 2
name = proto.Field(proto.STRING, number=1,)
parameter_name = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
scope = proto.Field(proto.ENUM, number=5, enum=DimensionScope,)
disallow_ads_personalization = proto.Field(proto.BOOL, number=6,)
class CustomMetric(proto.Message):
r"""A definition for a custom metric.
Attributes:
name (str):
Output only. Resource name for this
CustomMetric resource. Format:
properties/{property}/customMetrics/{customMetric}
parameter_name (str):
Required. Immutable. Tagging name for this
custom metric.
If this is an event-scoped metric, then this is
the event parameter name.
May only contain alphanumeric and underscore
charactes, starting with a letter. Max length of
40 characters for event-scoped metrics.
display_name (str):
Required. Display name for this custom metric
as shown in the Analytics UI. Max length of 82
characters, alphanumeric plus space and
underscore starting with a letter. Legacy
system-generated display names may contain
square brackets, but updates to this field will
never permit square brackets.
description (str):
Optional. Description for this custom
dimension. Max length of 150 characters.
measurement_unit (google.analytics.admin_v1alpha.types.CustomMetric.MeasurementUnit):
Required. The type for the custom metric's
value.
scope (google.analytics.admin_v1alpha.types.CustomMetric.MetricScope):
Required. Immutable. The scope of this custom
metric.
restricted_metric_type (Sequence[google.analytics.admin_v1alpha.types.CustomMetric.RestrictedMetricType]):
Optional. Types of restricted data that this
metric may contain. Required for metrics with
CURRENCY measurement unit. Must be empty for
metrics with a non-CURRENCY measurement unit.
"""
class MeasurementUnit(proto.Enum):
r"""Possible types of representing the custom metric's value.
Currency representation may change in the future, requiring a
breaking API change.
"""
MEASUREMENT_UNIT_UNSPECIFIED = 0
STANDARD = 1
CURRENCY = 2
FEET = 3
METERS = 4
KILOMETERS = 5
MILES = 6
MILLISECONDS = 7
SECONDS = 8
MINUTES = 9
HOURS = 10
class MetricScope(proto.Enum):
r"""The scope of this metric."""
METRIC_SCOPE_UNSPECIFIED = 0
EVENT = 1
class RestrictedMetricType(proto.Enum):
r"""Labels that mark the data in this custom metric as data that
should be restricted to specific users.
"""
RESTRICTED_METRIC_TYPE_UNSPECIFIED = 0
COST_DATA = 1
REVENUE_DATA = 2
name = proto.Field(proto.STRING, number=1,)
parameter_name = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
description = proto.Field(proto.STRING, number=4,)
measurement_unit = proto.Field(proto.ENUM, number=5, enum=MeasurementUnit,)
scope = proto.Field(proto.ENUM, number=6, enum=MetricScope,)
restricted_metric_type = proto.RepeatedField(
proto.ENUM, number=8, enum=RestrictedMetricType,
)
class DataRetentionSettings(proto.Message):
r"""Settings values for data retention. This is a singleton
resource.
Attributes:
name (str):
Output only. Resource name for this
DataRetentionSetting resource. Format:
properties/{property}/dataRetentionSettings
event_data_retention (google.analytics.admin_v1alpha.types.DataRetentionSettings.RetentionDuration):
The length of time that event-level data is
retained.
reset_user_data_on_new_activity (bool):
If true, reset the retention period for the
user identifier with every event from that user.
"""
class RetentionDuration(proto.Enum):
r"""Valid values for the data retention duration."""
RETENTION_DURATION_UNSPECIFIED = 0
TWO_MONTHS = 1
FOURTEEN_MONTHS = 3
TWENTY_SIX_MONTHS = 4
THIRTY_EIGHT_MONTHS = 5
FIFTY_MONTHS = 6
name = proto.Field(proto.STRING, number=1,)
event_data_retention = proto.Field(proto.ENUM, number=2, enum=RetentionDuration,)
reset_user_data_on_new_activity = proto.Field(proto.BOOL, number=3,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
"""Test the tractive config flow."""
from unittest.mock import patch
import aiotractive
from homeassistant import config_entries, setup
from homeassistant.components.tractive.const import DOMAIN
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
USER_INPUT = {
"email": "test-email@example.com",
"password": "test-password",
}
async def test_form(hass: HomeAssistant) -> None:
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"aiotractive.api.API.user_id", return_value={"user_id": "user_id"}
), patch(
"homeassistant.components.tractive.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "test-email@example.com"
assert result2["data"] == USER_INPUT
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"aiotractive.api.API.user_id",
side_effect=aiotractive.exceptions.UnauthorizedError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_unknown_error(hass: HomeAssistant) -> None:
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"aiotractive.api.API.user_id",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_flow_entry_already_exists(hass: HomeAssistant) -> None:
"""Test user input for config_entry that already exists."""
first_entry = MockConfigEntry(
domain="tractive",
data=USER_INPUT,
unique_id="USERID",
)
first_entry.add_to_hass(hass)
with patch("aiotractive.api.API.user_id", return_value="USERID"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_reauthentication(hass):
"""Test Tractive reauthentication."""
old_entry = MockConfigEntry(
domain="tractive",
data=USER_INPUT,
unique_id="USERID",
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": old_entry.unique_id,
"entry_id": old_entry.entry_id,
},
data=old_entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "reauth_confirm"
with patch("aiotractive.api.API.user_id", return_value="USERID"):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
async def test_reauthentication_failure(hass):
"""Test Tractive reauthentication failure."""
old_entry = MockConfigEntry(
domain="tractive",
data=USER_INPUT,
unique_id="USERID",
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": old_entry.unique_id,
"entry_id": old_entry.entry_id,
},
data=old_entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "reauth_confirm"
with patch(
"aiotractive.api.API.user_id",
side_effect=aiotractive.exceptions.UnauthorizedError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["step_id"] == "reauth_confirm"
assert result["type"] == "form"
assert result2["errors"]["base"] == "invalid_auth"
async def test_reauthentication_unknown_failure(hass):
"""Test Tractive reauthentication failure."""
old_entry = MockConfigEntry(
domain="tractive",
data=USER_INPUT,
unique_id="USERID",
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": old_entry.unique_id,
"entry_id": old_entry.entry_id,
},
data=old_entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "reauth_confirm"
with patch(
"aiotractive.api.API.user_id",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["step_id"] == "reauth_confirm"
assert result["type"] == "form"
assert result2["errors"]["base"] == "unknown"
async def test_reauthentication_failure_no_existing_entry(hass):
"""Test Tractive reauthentication with no existing entry."""
old_entry = MockConfigEntry(
domain="tractive",
data=USER_INPUT,
unique_id="USERID",
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": config_entries.SOURCE_REAUTH,
"unique_id": old_entry.unique_id,
"entry_id": old_entry.entry_id,
},
data=old_entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
assert result["step_id"] == "reauth_confirm"
with patch("aiotractive.api.API.user_id", return_value="USERID_DIFFERENT"):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_failed_existing"
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import benchmark
from telemetry import story
from telemetry.internal.results import base_test_results_unittest
from telemetry.internal.results import chart_json_output_formatter
from telemetry.internal.results import json_output_formatter
from telemetry.internal.results import page_test_results
from telemetry import page as page_module
from telemetry.testing import stream
from telemetry.value import failure
from telemetry.value import histogram
from telemetry.value import improvement_direction
from telemetry.value import scalar
from telemetry.value import skip
from telemetry.value import trace
from tracing.trace_data import trace_data
from tracing.value import histogram as histogram_module
from tracing.value import histogram_set
from tracing.value.diagnostics import diagnostic
from tracing.value.diagnostics import reserved_infos
class PageTestResultsTest(base_test_results_unittest.BaseTestResultsUnittest):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(page_module.Page("http://www.bar.com/", story_set,
story_set.base_dir,
name='http://www.bar.com/'))
story_set.AddStory(page_module.Page("http://www.baz.com/", story_set,
story_set.base_dir,
name='http://www.baz.com/'))
story_set.AddStory(page_module.Page("http://www.foo.com/", story_set,
story_set.base_dir,
name='http://www.foo.com/'))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
def testFailures(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(
failure.FailureValue(self.pages[0], self.CreateException()))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.DidRunPage(self.pages[1])
self.assertEqual(set([self.pages[0]]), results.pages_that_failed)
self.assertEqual(set([self.pages[1]]), results.pages_that_succeeded)
self.assertEqual(2, len(results.all_page_runs))
self.assertTrue(results.all_page_runs[0].failed)
self.assertTrue(results.all_page_runs[1].ok)
def testSkips(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(skip.SkipValue(self.pages[0], 'testing reason'))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.DidRunPage(self.pages[1])
self.assertTrue(results.all_page_runs[0].skipped)
self.assertEqual(self.pages[0], results.all_page_runs[0].story)
self.assertEqual(set([self.pages[0], self.pages[1]]),
results.pages_that_succeeded)
self.assertEqual(2, len(results.all_page_runs))
self.assertTrue(results.all_page_runs[0].skipped)
self.assertTrue(results.all_page_runs[1].ok)
def testPassesNoSkips(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(
failure.FailureValue(self.pages[0], self.CreateException()))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.DidRunPage(self.pages[1])
results.WillRunPage(self.pages[2])
results.AddValue(skip.SkipValue(self.pages[2], 'testing reason'))
results.DidRunPage(self.pages[2])
self.assertEqual(set([self.pages[0]]), results.pages_that_failed)
self.assertEqual(set([self.pages[1], self.pages[2]]),
results.pages_that_succeeded)
self.assertEqual(set([self.pages[1]]),
results.pages_that_succeeded_and_not_skipped)
self.assertEqual(3, len(results.all_page_runs))
self.assertTrue(results.all_page_runs[0].failed)
self.assertTrue(results.all_page_runs[1].ok)
self.assertTrue(results.all_page_runs[2].skipped)
def testBasic(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[1])
results.PrintSummary()
values = results.FindPageSpecificValuesForPage(self.pages[0], 'a')
self.assertEquals(1, len(values))
v = values[0]
self.assertEquals(v.name, 'a')
self.assertEquals(v.page, self.pages[0])
values = results.FindAllPageSpecificValuesNamed('a')
assert len(values) == 2
def testAddValueWithStoryGroupingKeys(self):
results = page_test_results.PageTestResults()
self.pages[0].grouping_keys['foo'] = 'bar'
self.pages[0].grouping_keys['answer'] = '42'
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.PrintSummary()
values = results.FindPageSpecificValuesForPage(self.pages[0], 'a')
v = values[0]
self.assertEquals(v.grouping_keys['foo'], 'bar')
self.assertEquals(v.grouping_keys['answer'], '42')
self.assertEquals(v.tir_label, '42_bar')
def testAddValueWithStoryGroupingKeysAndMatchingTirLabel(self):
results = page_test_results.PageTestResults()
self.pages[0].grouping_keys['foo'] = 'bar'
self.pages[0].grouping_keys['answer'] = '42'
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP,
tir_label='42_bar'))
results.DidRunPage(self.pages[0])
results.PrintSummary()
values = results.FindPageSpecificValuesForPage(self.pages[0], 'a')
v = values[0]
self.assertEquals(v.grouping_keys['foo'], 'bar')
self.assertEquals(v.grouping_keys['answer'], '42')
self.assertEquals(v.tir_label, '42_bar')
def testAddValueWithStoryGroupingKeysAndMismatchingTirLabel(self):
results = page_test_results.PageTestResults()
self.pages[0].grouping_keys['foo'] = 'bar'
self.pages[0].grouping_keys['answer'] = '42'
results.WillRunPage(self.pages[0])
with self.assertRaises(AssertionError):
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP,
tir_label='another_label'))
def testAddValueWithDuplicateStoryGroupingKeyFails(self):
results = page_test_results.PageTestResults()
self.pages[0].grouping_keys['foo'] = 'bar'
results.WillRunPage(self.pages[0])
with self.assertRaises(AssertionError):
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP,
grouping_keys={'foo': 'bar'}))
def testUrlIsInvalidValue(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
self.assertRaises(
AssertionError,
lambda: results.AddValue(scalar.ScalarValue(
self.pages[0], 'url', 'string', 'foo',
improvement_direction=improvement_direction.UP)))
def testAddSummaryValueWithPageSpecified(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
self.assertRaises(
AssertionError,
lambda: results.AddSummaryValue(scalar.ScalarValue(
self.pages[0], 'a', 'units', 3,
improvement_direction=improvement_direction.UP)))
def testUnitChange(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
self.assertRaises(
AssertionError,
lambda: results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'foobgrobbers', 3,
improvement_direction=improvement_direction.UP)))
def testTypeChange(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
self.assertRaises(
AssertionError,
lambda: results.AddValue(histogram.HistogramValue(
self.pages[1], 'a', 'seconds',
raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
improvement_direction=improvement_direction.UP)))
def testGetPagesThatSucceededAllPagesFail(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'message'))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'seconds', 7,
improvement_direction=improvement_direction.UP))
results.AddValue(failure.FailureValue.FromMessage(self.pages[1], 'message'))
results.DidRunPage(self.pages[1])
results.PrintSummary()
self.assertEquals(0, len(results.pages_that_succeeded))
def testGetSuccessfulPageValuesMergedNoFailures(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
self.assertEquals(1, len(results.all_page_specific_values))
results.DidRunPage(self.pages[0])
def testGetAllValuesForSuccessfulPages(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
value1 = scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(value1)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
value2 = scalar.ScalarValue(
self.pages[1], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(value2)
results.DidRunPage(self.pages[1])
results.WillRunPage(self.pages[2])
value3 = scalar.ScalarValue(
self.pages[2], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(value3)
results.DidRunPage(self.pages[2])
self.assertEquals(
[value1, value2, value3], results.all_page_specific_values)
def testGetAllValuesForSuccessfulPagesOnePageFails(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
value1 = scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(value1)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
value2 = failure.FailureValue.FromMessage(self.pages[1], 'Failure')
results.AddValue(value2)
results.DidRunPage(self.pages[1])
results.WillRunPage(self.pages[2])
value3 = scalar.ScalarValue(
self.pages[2], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(value3)
results.DidRunPage(self.pages[2])
self.assertEquals(
[value1, value2, value3], results.all_page_specific_values)
def testFindValues(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
v0 = scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
v1 = scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 4,
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(self.pages[1])
values = results.FindValues(lambda v: v.value == 3)
self.assertEquals([v0], values)
def testValueWithTIRLabel(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
v0 = scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3, tir_label='foo',
improvement_direction=improvement_direction.UP)
results.AddValue(v0)
v1 = scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3, tir_label='bar',
improvement_direction=improvement_direction.UP)
results.AddValue(v1)
results.DidRunPage(self.pages[0])
values = results.FindAllPageSpecificValuesFromIRNamed('foo', 'a')
self.assertEquals([v0], values)
def testTraceValue(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(trace.TraceValue(
None, trace_data.CreateTraceDataFromRawData([[{'test': 1}]])))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(trace.TraceValue(
None, trace_data.CreateTraceDataFromRawData([[{'test': 2}]])))
results.DidRunPage(self.pages[1])
results.PrintSummary()
values = results.FindAllTraceValues()
self.assertEquals(2, len(values))
def testCleanUpCleansUpTraceValues(self):
results = page_test_results.PageTestResults()
v0 = trace.TraceValue(
None, trace_data.CreateTraceDataFromRawData([{'test': 1}]))
v1 = trace.TraceValue(
None, trace_data.CreateTraceDataFromRawData([{'test': 2}]))
results.WillRunPage(self.pages[0])
results.AddValue(v0)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(v1)
results.DidRunPage(self.pages[1])
results.CleanUp()
self.assertTrue(v0.cleaned_up)
self.assertTrue(v1.cleaned_up)
def testNoTracesLeftAfterCleanUp(self):
results = page_test_results.PageTestResults()
v0 = trace.TraceValue(None,
trace_data.CreateTraceDataFromRawData([{'test': 1}]))
v1 = trace.TraceValue(None,
trace_data.CreateTraceDataFromRawData([{'test': 2}]))
results.WillRunPage(self.pages[0])
results.AddValue(v0)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(v1)
results.DidRunPage(self.pages[1])
results.CleanUp()
self.assertFalse(results.FindAllTraceValues())
def testPrintSummaryDisabledResults(self):
output_stream = stream.TestOutputStream()
output_formatters = []
benchmark_metadata = benchmark.BenchmarkMetadata(
'benchmark_name', 'benchmark_description')
output_formatters.append(
chart_json_output_formatter.ChartJsonOutputFormatter(
output_stream, benchmark_metadata))
output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream, benchmark_metadata))
results = page_test_results.PageTestResults(
output_formatters=output_formatters, benchmark_enabled=False)
results.PrintSummary()
self.assertEquals(
output_stream.output_data,
'{\n \"enabled\": false,\n ' +
'\"benchmark_name\": \"benchmark_name\"\n}\n')
def testAddSharedDiagnostic(self):
results = page_test_results.PageTestResults()
results.telemetry_info.benchmark_start_epoch = 1501773200
results.WillRunPage(self.pages[0])
results.DidRunPage(self.pages[0])
results.CleanUp()
results.histograms.AddSharedDiagnostic(
reserved_infos.TELEMETRY.name,
histogram_module.TelemetryInfo())
benchmark_metadata = benchmark.BenchmarkMetadata(
'benchmark_name', 'benchmark_description')
results.PopulateHistogramSet(benchmark_metadata)
histogram_dicts = results.AsHistogramDicts()
self.assertEquals(1, len(histogram_dicts))
diag = diagnostic.Diagnostic.FromDict(histogram_dicts[0])
self.assertIsInstance(diag, histogram_module.TelemetryInfo)
def testPopulateHistogramSet_UsesScalarValueData(self):
results = page_test_results.PageTestResults()
results.telemetry_info.benchmark_start_epoch = 1501773200
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.CleanUp()
benchmark_metadata = benchmark.BenchmarkMetadata(
'benchmark_name', 'benchmark_description')
results.PopulateHistogramSet(benchmark_metadata)
histogram_dicts = results.AsHistogramDicts()
self.assertEquals(1, len(histogram_dicts))
h = histogram_module.Histogram.FromDict(histogram_dicts[0])
self.assertEquals('a', h.name)
def testPopulateHistogramSet_UsesHistogramSetData(self):
original_diagnostic = histogram_module.TelemetryInfo()
results = page_test_results.PageTestResults()
results.telemetry_info.benchmark_start_epoch = 1501773200
results.WillRunPage(self.pages[0])
results.histograms.AddHistogram(histogram_module.Histogram('foo', 'count'))
results.histograms.AddSharedDiagnostic(
reserved_infos.TELEMETRY.name, original_diagnostic)
results.DidRunPage(self.pages[0])
results.CleanUp()
benchmark_metadata = benchmark.BenchmarkMetadata(
'benchmark_name', 'benchmark_description')
results.PopulateHistogramSet(benchmark_metadata)
histogram_dicts = results.AsHistogramDicts()
self.assertEquals(2, len(histogram_dicts))
hs = histogram_set.HistogramSet()
hs.ImportDicts(histogram_dicts)
diag = hs.LookupDiagnostic(original_diagnostic.guid)
self.assertIsInstance(diag, histogram_module.TelemetryInfo)
class PageTestResultsFilterTest(unittest.TestCase):
def setUp(self):
story_set = story.StorySet(base_dir=os.path.dirname(__file__))
story_set.AddStory(
page_module.Page('http://www.foo.com/', story_set, story_set.base_dir,
name='http://www.foo.com'))
story_set.AddStory(
page_module.Page('http://www.bar.com/', story_set, story_set.base_dir,
name='http://www.bar.com/'))
self.story_set = story_set
@property
def pages(self):
return self.story_set.stories
def testFilterValue(self):
def AcceptValueNamed_a(value, _):
return value.name == 'a'
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptValueNamed_a)
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.AddValue(scalar.ScalarValue(
self.pages[0], 'b', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.AddValue(scalar.ScalarValue(
self.pages[1], 'd', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[1])
results.PrintSummary()
self.assertEquals(
[('a', 'http://www.foo.com/'), ('a', 'http://www.bar.com/')],
[(v.name, v.page.url) for v in results.all_page_specific_values])
def testFilterIsFirstResult(self):
def AcceptSecondValues(_, is_first_result):
return not is_first_result
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptSecondValues)
# First results (filtered out)
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 7,
improvement_direction=improvement_direction.UP))
results.AddValue(scalar.ScalarValue(
self.pages[0], 'b', 'seconds', 8,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'seconds', 5,
improvement_direction=improvement_direction.UP))
results.AddValue(scalar.ScalarValue(
self.pages[1], 'd', 'seconds', 6,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[1])
# Second results
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'a', 'seconds', 3,
improvement_direction=improvement_direction.UP))
results.AddValue(scalar.ScalarValue(
self.pages[0], 'b', 'seconds', 4,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'seconds', 1,
improvement_direction=improvement_direction.UP))
results.AddValue(scalar.ScalarValue(
self.pages[1], 'd', 'seconds', 2,
improvement_direction=improvement_direction.UP))
results.DidRunPage(self.pages[1])
results.PrintSummary()
expected_values = [
('a', 'http://www.foo.com/', 3),
('b', 'http://www.foo.com/', 4),
('a', 'http://www.bar.com/', 1),
('d', 'http://www.bar.com/', 2)]
actual_values = [(v.name, v.page.url, v.value)
for v in results.all_page_specific_values]
self.assertEquals(expected_values, actual_values)
def testFailureValueCannotBeFiltered(self):
def AcceptValueNamed_a(value, _):
return value.name == 'a'
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptValueNamed_a)
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(
self.pages[0], 'b', 'seconds', 8,
improvement_direction=improvement_direction.UP))
failure_value = failure.FailureValue.FromMessage(self.pages[0], 'failure')
results.AddValue(failure_value)
results.DidRunPage(self.pages[0])
results.PrintSummary()
# Although predicate says only accept values named 'a', the failure value is
# added anyway.
self.assertEquals(len(results.all_page_specific_values), 1)
self.assertIn(failure_value, results.all_page_specific_values)
def testSkipValueCannotBeFiltered(self):
def AcceptValueNamed_a(value, _):
return value.name == 'a'
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptValueNamed_a)
results.WillRunPage(self.pages[0])
skip_value = skip.SkipValue(self.pages[0], 'skip for testing')
results.AddValue(scalar.ScalarValue(
self.pages[0], 'b', 'seconds', 8,
improvement_direction=improvement_direction.UP))
results.AddValue(skip_value)
results.DidRunPage(self.pages[0])
results.PrintSummary()
# Although predicate says only accept value with named 'a', skip value is
# added anyway.
self.assertEquals(len(results.all_page_specific_values), 1)
self.assertIn(skip_value, results.all_page_specific_values)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to read data in the graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.input_pipeline.python.ops import input_pipeline_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as input_ops
from tensorflow.python.training import queue_runner
# Default name for key in the feature dict.
KEY_FEATURE_NAME = '__key__'
def read_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
_, examples = read_keyed_batch_examples(
file_pattern=file_pattern,
batch_size=batch_size,
reader=reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
name=name,
seed=seed)
return examples
def read_keyed_batch_examples(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size`.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
setup_shared_queue=False,
name=name,
seed=seed)
def _read_keyed_batch_examples_shared_queue(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
parse_fn=None,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that pulls from the shared queue, read `Example`
protos using provided `reader`, use batch queue to create batches of examples
of size `batch_size`. This provides at most once visit guarantees. Note that
this only works if the parameter servers are not pre-empted or restarted or
the session is not restored from a checkpoint since the state of a queue
is not checkpointed and we will end up restarting from the entire list of
files.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Use `parse_fn` if you need to do parsing / processing on single examples.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
return _read_keyed_batch_examples_helper(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
read_batch_size=read_batch_size,
parse_fn=parse_fn,
setup_shared_queue=True,
name=name,
seed=seed)
def _get_file_names(file_pattern, randomize_input):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
randomize_input: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError('No files given to dequeue_examples.')
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError('No files match %s.' % file_pattern)
# Sort files so it will be deterministic for unit tests. They'll be shuffled
# in `string_input_producer` if `randomize_input` is enabled.
if not randomize_input:
file_names = sorted(file_names)
return file_names
def _get_examples(file_name_queue, reader, num_threads, read_batch_size,
filter_fn, parse_fn):
with ops.name_scope('read'):
example_list = []
for _ in range(num_threads):
if read_batch_size > 1:
keys, examples_proto = reader().read_up_to(file_name_queue,
read_batch_size)
else:
keys, examples_proto = reader().read(file_name_queue)
if filter_fn:
mask = filter_fn(keys, examples_proto)
keys = array_ops.boolean_mask(keys, mask)
examples_proto = array_ops.boolean_mask(examples_proto, mask)
if parse_fn:
parsed_examples = parse_fn(examples_proto)
# Map keys into example map because batch_join doesn't support
# tuple of Tensor + dict.
if isinstance(parsed_examples, dict):
parsed_examples[KEY_FEATURE_NAME] = keys
example_list.append(parsed_examples)
else:
example_list.append((keys, parsed_examples))
else:
example_list.append((keys, examples_proto))
return example_list
def _read_keyed_batch_examples_helper(file_pattern,
batch_size,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
num_threads=1,
read_batch_size=1,
filter_fn=None,
parse_fn=None,
setup_shared_queue=False,
name=None,
seed=None):
"""Adds operations to read, queue, batch `Example` protos.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If `None`, cycles through the dataset forever.
NOTE - If specified, creates a variable that must be initialized, so call
`tf.global_variables_initializer()` and run the op in a session.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
filter_fn: Filtering function, takes both keys as well `Example` Tensors
and returns a boolean mask of the same shape as the input Tensors to
be applied for filtering. If `None`, no filtering is done.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
setup_shared_queue: Whether to set up a shared queue for file names.
name: Name of resulting op.
seed: An integer (optional). Seed used if randomize_input == True.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
"""
# Retrieve files to read.
file_names = _get_file_names(file_pattern, randomize_input)
# Check input parameters are given and reasonable.
if (not queue_capacity) or (queue_capacity <= 0):
raise ValueError('Invalid queue_capacity %s.' % queue_capacity)
if (batch_size is None) or ((not isinstance(batch_size, ops.Tensor)) and
(batch_size <= 0 or batch_size > queue_capacity)):
raise ValueError('Invalid batch_size %s, with queue_capacity %s.' %
(batch_size, queue_capacity))
if (read_batch_size is None) or (
(not isinstance(read_batch_size, ops.Tensor)) and (read_batch_size <= 0)):
raise ValueError('Invalid read_batch_size %s.' % read_batch_size)
if (not num_threads) or (num_threads <= 0):
raise ValueError('Invalid num_threads %s.' % num_threads)
if (num_epochs is not None) and (num_epochs <= 0):
raise ValueError('Invalid num_epochs %s.' % num_epochs)
with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope:
with ops.name_scope('file_name_queue') as file_name_queue_scope:
if setup_shared_queue:
file_name_queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[dtypes.string], shapes=[[]])
enqueue_op = file_name_queue.enqueue(
input_pipeline_ops.seek_next(
file_names, shuffle=randomize_input, num_epochs=num_epochs,
seed=seed))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(file_name_queue, [enqueue_op]))
else:
file_name_queue = input_ops.string_input_producer(
constant_op.constant(
file_names, name='input'),
shuffle=randomize_input,
num_epochs=num_epochs,
name=file_name_queue_scope,
seed=seed)
example_list = _get_examples(file_name_queue, reader, num_threads,
read_batch_size, filter_fn, parse_fn)
enqueue_many = read_batch_size > 1
if num_epochs is None:
allow_smaller_final_batch = False
else:
allow_smaller_final_batch = True
# Setup batching queue given list of read example tensors.
if randomize_input:
if isinstance(batch_size, ops.Tensor):
min_after_dequeue = int(queue_capacity * 0.4)
else:
min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size)
queued_examples_with_keys = input_ops.shuffle_batch_join(
example_list,
batch_size,
capacity=queue_capacity,
min_after_dequeue=min_after_dequeue,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch,
seed=seed)
else:
queued_examples_with_keys = input_ops.batch_join(
example_list,
batch_size,
capacity=queue_capacity,
enqueue_many=enqueue_many,
name=scope,
allow_smaller_final_batch=allow_smaller_final_batch)
if parse_fn and isinstance(queued_examples_with_keys, dict):
queued_keys = queued_examples_with_keys.pop(KEY_FEATURE_NAME)
return queued_keys, queued_examples_with_keys
return queued_examples_with_keys
def read_keyed_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_enqueue_threads=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = read_keyed_batch_examples(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_enqueue_threads,
name=scope)
def _read_keyed_batch_features_shared_queue(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
feature_queue_capacity=100,
num_queue_runners=2,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a shared queue for file
names, setup a worker queue that gets filenames from the shared queue,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` of string keys.
- A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
with ops.name_scope(name, 'read_batch_features', [file_pattern]) as scope:
keys, examples = _read_keyed_batch_examples_shared_queue(
file_pattern,
batch_size,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=reader_num_threads,
read_batch_size=batch_size,
parse_fn=parse_fn,
name=scope)
# Parse the example.
feature_map = parsing_ops.parse_example(examples, features)
return queue_parsed_features(
feature_map,
keys=keys,
feature_queue_capacity=feature_queue_capacity,
num_enqueue_threads=num_queue_runners,
name=scope)
def queue_parsed_features(parsed_features,
keys=None,
feature_queue_capacity=100,
num_enqueue_threads=2,
name=None):
"""Speeds up parsing by using queues to do it asynchronously.
This function adds the tensors in `parsed_features` to a queue, which allows
the parsing (or any other expensive op before this) to be asynchronous wrt the
rest of the training graph. This greatly improves read latency and speeds up
training since the data will already be parsed and ready when each step of
training needs it.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
keys: `Tensor` of string keys.
feature_queue_capacity: Capacity of the parsed features queue.
num_enqueue_threads: Number of threads to enqueue the parsed example queue.
Using multiple threads to enqueue the parsed example queue helps maintain
a full queue when the subsequent computations overall are cheaper than
parsing.
name: Name of resulting op.
Returns:
Returns tuple of:
- `Tensor` corresponding to `keys` if provided, otherwise `None`.
- A dict of string key to `Tensor` or `SparseTensor` objects corresponding
to `parsed_features`.
Raises:
ValueError: for invalid inputs.
"""
args = list(parsed_features.values())
if keys is not None:
args += [keys]
with ops.name_scope(name, 'queue_parsed_features', args):
# Lets also add preprocessed tensors into the queue types for each item of
# the queue.
tensors_to_enqueue = []
# Each entry contains the key, and a boolean which indicates whether the
# tensor was a sparse tensor.
tensors_mapping = []
# TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse
# tensors into a queue. This could be taken care in somewhere else so others
# can reuse it. Also, QueueBase maybe extended to handle sparse tensors
# directly.
for key in sorted(parsed_features.keys()):
tensor = parsed_features[key]
if isinstance(tensor, sparse_tensor.SparseTensor):
tensors_mapping.append((key, True))
tensors_to_enqueue.extend(
[tensor.indices, tensor.values, tensor.dense_shape])
else:
tensors_mapping.append((key, False))
tensors_to_enqueue.append(tensor)
if keys is not None:
tensors_to_enqueue.append(keys)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)
# Add a summary op to debug if our feature queue is full or not.
summary.scalar('queue/parsed_features/%s/fraction_of_%d_full' %
(input_queue.name, feature_queue_capacity),
math_ops.cast(input_queue.size(), dtypes.float32) *
(1. / feature_queue_capacity))
# Use a single QueueRunner with multiple threads to enqueue so the queue is
# always full. The threads are coordinated so the last batch will not be
# lost.
enqueue_ops = [
input_queue.enqueue(tensors_to_enqueue)
for _ in range(num_enqueue_threads)
]
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
enqueue_ops,
queue_closed_exception_types=(errors.OutOfRangeError,
errors.CancelledError)))
dequeued_tensors = input_queue.dequeue()
if not isinstance(dequeued_tensors, list):
# input_queue.dequeue() returns a single tensor instead of a list of
# tensors if there is only one tensor to dequeue, which breaks the
# assumption of a list below.
dequeued_tensors = [dequeued_tensors]
# Reset shapes on dequeued tensors.
for i in range(len(tensors_to_enqueue)):
dequeued_tensors[i].set_shape(tensors_to_enqueue[i].get_shape())
# Recreate feature mapping according to the original dictionary.
dequeued_parsed_features = {}
index = 0
for key, is_sparse_tensor in tensors_mapping:
if is_sparse_tensor:
# Three tensors are (indices, values, shape).
dequeued_parsed_features[key] = sparse_tensor.SparseTensor(
dequeued_tensors[index], dequeued_tensors[index + 1],
dequeued_tensors[index + 2])
index += 3
else:
dequeued_parsed_features[key] = dequeued_tensors[index]
index += 1
dequeued_keys = None
if keys is not None:
dequeued_keys = dequeued_tensors[-1]
return dequeued_keys, dequeued_parsed_features
def read_batch_features(file_pattern,
batch_size,
features,
reader,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
feature_queue_capacity=100,
reader_num_threads=1,
parse_fn=None,
name=None):
"""Adds operations to read, queue, batch and parse `Example` protos.
Given file pattern (or list of files), will setup a queue for file names,
read `Example` proto using provided `reader`, use batch queue to create
batches of examples of size `batch_size` and parse example given `features`
specification.
All queue runners are added to the queue runners collection, and may be
started via `start_queue_runners`.
All ops are added to the default graph.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
feature_queue_capacity: Capacity of the parsed features queue. Set this
value to a small number, for example 5 if the parsed features are large.
reader_num_threads: The number of threads to read examples.
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
_, features = read_keyed_batch_features(
file_pattern,
batch_size,
features,
reader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
feature_queue_capacity=feature_queue_capacity,
reader_num_threads=reader_num_threads,
parse_fn=parse_fn,
name=name)
return features
def read_batch_record_features(file_pattern,
batch_size,
features,
randomize_input=True,
num_epochs=None,
queue_capacity=10000,
reader_num_threads=1,
name='dequeue_record_examples'):
"""Reads TFRecord, queues, batches and parses `Example` proto.
See more detailed description in `read_examples`.
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
randomize_input: Whether the input should be randomized.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. NOTE - If specified,
creates a variable that must be initialized, so call
tf.local_variables_initializer() and run the op in a session.
queue_capacity: Capacity for input queue.
reader_num_threads: The number of threads to read examples.
name: Name of resulting op.
Returns:
A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
"""
return read_batch_features(
file_pattern=file_pattern,
batch_size=batch_size,
features=features,
reader=io_ops.TFRecordReader,
randomize_input=randomize_input,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
reader_num_threads=reader_num_threads,
name=name)
|
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from PyQt4 import QtCore, QtGui
import os
import re
import uuid
from vistrails.core.modules.basic_modules import Path
from vistrails.gui.common_widgets import QSearchBox
from vistrails.gui.modules.constant_configuration import ConstantWidgetMixin
from vistrails.gui.modules.module_configure import StandardModuleConfigurationWidget
from db_utils import DatabaseAccessSingleton
from identifiers import identifier as persistence_pkg
import repo
class IntegerWrapper(object):
def __init__(self, idx):
self.idx = idx
class PersistentRefModelSingleton(object):
def __new__(self, *args, **kw):
if PersistentRefModel._instance is None:
obj = PersistentRefModel(*args, **kw)
PersistentRefModel._instance = obj
return PersistentRefModel._instance
class PersistentRefModel(QtCore.QAbstractItemModel):
_instance = None
# 2013-09-03 18:57:53.133000
_DATE_FORMAT = re.compile(r'^(?P<y>[0-9]{4})-'
r'(?P<m>[0-9]{2})-'
r'(?P<d>[0-9]{2}) '
r'(?P<H>[0-9]{2}):'
r'(?P<M>[0-9]{2}):'
r'(?P<S>[0-9]{2}).'
r'(?P<ms>[0-9]+)$')
cols = {0: "name",
1: "type",
2: "tags",
3: "user",
4: "date_created",
5: "date_modified",
6: "id",
7: "version",
8: "content_hash",
9: "signature"}
idxs = dict((v,k) for (k,v) in cols.iteritems())
headers = {"id": "ID",
"name": "Name",
"tags": "Tags",
"user": "User",
"date_created": "Date Created",
"date_modified": "Date Modified",
"content_hash": "Content Hash",
"version": "Version",
"signature": "Signature",
"type": "Type"}
def __init__(self, parent=None):
QtCore.QAbstractItemModel.__init__(self, parent)
self.db_access = DatabaseAccessSingleton()
self.db_access.set_model(self)
rows = self.db_access.read_database(
[c[1] for c in sorted(self.cols.iteritems())])
rows = map(self.fix_dates, rows)
self.id_lists = {}
for ref in rows:
if ref[self.idxs['id']] not in self.id_lists:
self.id_lists[ref[self.idxs['id']]] = []
self.id_lists[ref[self.idxs['id']]].append(ref)
self.id_lists_keys = self.id_lists.keys()
self.integer_wrappers = {}
@staticmethod
def fix_dates(row):
row = list(row)
for c in ('date_created', 'date_modified'):
c = PersistentRefModel.idxs[c]
m = PersistentRefModel._DATE_FORMAT.match(row[c])
if m is not None:
row[c] = '{y}-{m}-{d} {H}:{M}'.format(**m.groupdict())
return tuple(row)
def rowCount(self, parent=QtCore.QModelIndex()):
if not parent.isValid():
# print 'rowCount:', len(self.id_lists_keys)
return len(self.id_lists_keys)
# print 'parentValid rowCount:', \
# len(self.id_lists[self.id_lists_keys[parent.row()]])
return len(self.id_lists[self.id_lists_keys[parent.row()]])
def columnCount(self, parent=QtCore.QModelIndex()):
# print 'columnCount:', len(self.headers)
return len(self.headers)
def hasChildren(self, parent=QtCore.QModelIndex()):
# print 'hasChildren:'
if not parent.isValid():
# print ' True'
return True
else:
# print ' PARENT:', parent.row(), parent.column(), \
# parent.internalPointer()
if not parent.parent().isValid():
# print ' TRUE'
return True
# print ' False'
return False
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if role != QtCore.Qt.DisplayRole:
return None
if section in self.cols:
return self.headers[self.cols[section]]
return None
def data(self, index, role):
if not index.isValid() or role != QtCore.Qt.DisplayRole:
return None
# if index.parent().isValid():
# print 'data', index.row(), index.column(), index.parent().row()
if index.parent().isValid():
# have a child node
# print 'got child node!', index.parent().row()
id_list = self.id_lists[self.id_lists_keys[index.parent().row()]]
data = id_list[index.row()]
else:
# have a parent node
id_list = self.id_lists[self.id_lists_keys[index.row()]]
data = id_list[0]
# want to have the earliest created date and latest modified date
if index.column() == self.idxs['date_created']:
dates = [l[index.column()] for l in id_list]
return min(dates)
if index.column() == self.idxs['date_modified']:
dates = [l[index.column()] for l in id_list]
return max(dates)
if index.column() == self.idxs['version'] or \
index.column() == self.idxs['signature'] or \
index.column() == self.idxs['content_hash'] or \
index.column() == self.idxs['user']:
return None
if index.column() < len(data):
return data[index.column()]
return None
def parent(self, index):
# print 'calling parent() method'
if index.isValid():
# print ' index is valid', index.row(), index.column()
if index.internalPointer():
parent_item = index.internalPointer().idx
# print ' parent_item:', parent_item
return self.createIndex(parent_item, 0, None)
# else:
# # print ' internalPointer is not valid'
# else:
# # print 'index not valid for parent call'
return QtCore.QModelIndex()
def index(self, row, column, parent):
# print 'index:', row, column
if not parent.isValid():
if len(self.id_lists_keys) > row:
# print ' no parent item'
return self.createIndex(row, column, None)
else:
# print ' **** parent_item', row, column, parent.row()
if len(self.id_lists[self.id_lists_keys[parent.row()]]) > row:
# print ' ++++ creating index'
# !!! internalPointer is a weakref in PyQt !!!
if parent.row() not in self.integer_wrappers:
integer_wrapper = IntegerWrapper(parent.row())
self.integer_wrappers[parent.row()] = integer_wrapper
else:
integer_wrapper = self.integer_wrappers[parent.row()]
# print ' ---- created parent wrapper'
return self.createIndex(row, column, integer_wrapper)
return QtCore.QModelIndex()
def sort(self, column, order=QtCore.Qt.AscendingOrder):
self.emit(QtCore.SIGNAL('layoutAboutToBeChanged()'))
if column == -1:
return
self.id_lists_keys.sort(key=lambda x: self.id_lists[x][0][column],
reverse=(order==QtCore.Qt.AscendingOrder))
self.emit(QtCore.SIGNAL('layoutChanged()'))
def find_row(self, id, version=None):
if id in self.id_lists:
i = self.id_lists_keys.index(id)
if version is not None:
for j, data in enumerate(self.id_lists[id]):
if data[self.idxs['version']] == version:
if i not in self.integer_wrappers:
integer_wrapper = IntegerWrapper(i)
self.integer_wrappers[i] = integer_wrapper
else:
integer_wrapper = self.integer_wrappers[i]
return self.createIndex(j, 0, integer_wrapper)
return self.createIndex(i, 0, 0)
# for i, id in enumerate(self.id_lists_keys):
# if version is not None:
# for data in enumerate(self.id_lists[id]):
# if
# for i, data in enumerate(self.id_lists):
# if data[self.idxs['id']] == id and \
# (not version or data[self.idxs['version']] == version):
# return self.createIndex(i, 0, 0)
return QtCore.QModelIndex()
def add_data(self, value_dict):
id = value_dict['id']
value_list = []
for _, c in sorted(self.cols.iteritems()):
if c in value_dict:
value_list.append(str(value_dict[c]))
else:
value_list.append(None)
if id not in self.id_lists:
self.id_lists[id] = []
self.id_lists_keys.append(id)
self.id_lists[id].append(self.fix_dates(value_list))
self.reset()
def remove_data(self, where_dict):
id = where_dict['id']
version = where_dict.get('version', None)
if version is not None:
for idx, value_tuple in enumerate(self.id_lists[id]):
if value_tuple[self.idxs['version']] == version:
del self.id_lists[id][idx]
break
else:
path_type = self.id_lists[id][0][self.idxs['type']]
for idx, key_id in enumerate(self.id_lists_keys):
if key_id == id:
del self.id_lists_keys[idx]
break
del self.id_lists[id]
self.reset()
class PersistentRefView(QtGui.QTreeView):
def __init__(self, path_type=None, parent=None):
QtGui.QTreeView.__init__(self, parent)
self.my_model = PersistentRefModelSingleton()
print 'my_model:', id(self.my_model)
proxy_model = QtGui.QSortFilterProxyModel(self)
proxy_model.setSourceModel(self.my_model)
proxy_model.setFilterKeyColumn(-1)
self.setModel(proxy_model)
self.set_visibility(path_type)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setSortingEnabled(True)
self.current_id = None
self.current_version = None
for i in xrange(self.my_model.columnCount()):
self.resizeColumnToContents(i)
def set_visibility(self, path_type=None):
if path_type == "blob":
self.header().hideSection(self.my_model.idxs["type"])
for i, key in enumerate(self.my_model.id_lists_keys):
id_list = self.my_model.id_lists[key]
if id_list[0][self.my_model.idxs["type"]] != "blob":
# if i not in self.my_model.file_idxs:
print "setting index", i, "to hidden"
my_index = self.my_model.createIndex(i, 0, None)
index = self.model().mapFromSource(my_index)
self.setRowHidden(index.row(), QtCore.QModelIndex(), True)
elif path_type == "tree":
self.header().hideSection(self.my_model.idxs["type"])
for i, key in enumerate(self.my_model.id_lists_keys):
id_list = self.my_model.id_lists[key]
if id_list[0][self.my_model.idxs["type"]] != "tree":
# if i not in self.my_model.dir_idxs:
print "setting index", i, "to hidden"
my_index = self.my_model.createIndex(i, 0, None)
index = self.model().mapFromSource(my_index)
self.setRowHidden(index.row(), QtCore.QModelIndex(), True)
def set_selection(self):
if self.current_id:
my_index = self.my_model.find_row(self.current_id,
self.current_version)
index = self.model().mapFromSource(my_index)
if index.isValid():
print 'checking internalPointer', my_index.internalPointer()
if my_index.internalPointer():
my_expand_index = \
self.my_model.createIndex(
my_index.internalPointer().idx, 0, 0)
expand_index = self.model().mapFromSource(my_expand_index)
self.expand(expand_index)
self.selectionModel().select(
index, QtGui.QItemSelectionModel.ClearAndSelect | \
QtGui.QItemSelectionModel.Rows)
return True
return False
def set_id(self, id):
self.current_id = id
return self.set_selection()
def set_version(self, version):
self.current_version = version
return self.set_selection()
def get_id(self):
sf_index = self.selectionModel().selectedRows()[0]
index = self.model().mapToSource(sf_index)
if index.internalPointer():
paridx = index.internalPointer().idx
id_list = \
self.my_model.id_lists[self.my_model.id_lists_keys[paridx]]
id = id_list[index.row()][self.my_model.idxs['id']]
else:
id_list = \
self.my_model.id_lists[self.my_model.id_lists_keys[index.row()]]
id = id_list[0][self.my_model.idxs['id']]
return str(id)
def get_version(self):
sf_index = self.selectionModel().selectedRows()[0]
index = self.model().mapToSource(sf_index)
if index.internalPointer():
paridx = index.internalPointer().idx
id_list = \
self.my_model.id_lists[self.my_model.id_lists_keys[paridx]]
version = id_list[index.row()][self.my_model.idxs['version']]
return str(version)
return None
def get_info(self):
return self.get_info_list()[0]
def get_info_list(self):
s_indexes = self.selectionModel().selectedRows()
info_list = []
for s_idx in s_indexes:
index = self.model().mapToSource(s_idx)
if index.internalPointer():
paridx = index.internalPointer().idx
id_list = \
self.my_model.id_lists[self.my_model.id_lists_keys[paridx]]
info = id_list[index.row()]
version = str(info[self.my_model.idxs['version']])
else:
id_list = self.my_model.id_lists[ \
self.my_model.id_lists_keys[index.row()]]
info = id_list[0]
version = None
info_list.append((str(info[self.my_model.idxs['id']]),
version,
str(info[self.my_model.idxs['name']]),
str(info[self.my_model.idxs['tags']])))
return info_list
class PersistentRefDialog(QtGui.QDialog):
def __init__(self, param, parent=None):
QtGui.QDialog.__init__(self, parent)
# two tabs, one for starting from managed, one for local file
# options are set accordingly
# uuid assigned when options are set, either new or existing
# don't compute uuid for local file that hasn't been run
# need PersistentReference parameter
# allow user to pass contents in to PersistentRef
self.setWindowTitle("Configure Persistent Reference...")
self.settings = {'ref_id': None,
'ref_version': None,
'local_path': None,
'versioning': False,
'local_read_priority': True,
'write_back': True}
self.current_file = ""
db_file = "/vistrails/managed/files.db"
layout = QtGui.QVBoxLayout()
managed_group = QtGui.QGroupBox("Persistent Data")
managed_layout = QtGui.QVBoxLayout()
search = QSearchBox(False, False)
def keyPressEvent(obj, e):
print "got to key press event", e.key()
if e.key() in (QtCore.Qt.Key_Return,QtCore.Qt.Key_Enter):
if obj.currentText():
obj.emit(QtCore.SIGNAL('executeSearch(QString)'),
obj.searchEdit.currentText())
else:
obj.emit(QtCore.SIGNAL('resetSearch()'))
QtGui.QComboBox.keyPressEvent(obj, e)
print 'keyPressEvent:', search.searchEdit.keyPressEvent
search.searchEdit.keyPressEvent = keyPressEvent
print 'keyPressEvent:', search.searchEdit.keyPressEvent
self.connect(search, QtCore.SIGNAL('executeSearch(QString)'),
self.search_string)
self.connect(search, QtCore.SIGNAL('resetSearch()'),
self.reset_search)
managed_layout.addWidget(search)
self.table = PersistentRefView(db_file, self)
managed_layout.addWidget(self.table)
managed_group.setLayout(managed_layout)
layout.addWidget(managed_group)
local_group = QtGui.QGroupBox("Local Data")
local_layout = QtGui.QHBoxLayout()
self.filename_edit = QtGui.QLineEdit()
local_layout.addWidget(self.filename_edit)
filename_button = QtGui.QToolButton()
filename_button.setIcon(
QtGui.QIcon(filename_button.style().standardPixmap(
QtGui.QStyle.SP_DirOpenIcon)))
filename_button.setIconSize(QtCore.QSize(12,12))
filename_button.connect(filename_button,
QtCore.SIGNAL('clicked()'),
self.choose_file)
local_layout.addWidget(filename_button)
local_group.setLayout(local_layout)
layout.addWidget(local_group)
pref_layout = QtGui.QHBoxLayout()
version_group = QtGui.QGroupBox("Versioning")
version_layout = QtGui.QVBoxLayout()
version_off = QtGui.QRadioButton("Create New ID")
version_layout.addWidget(version_off)
version_on = QtGui.QRadioButton("Create New Version")
version_layout.addWidget(version_on)
version_group.setLayout(version_layout)
pref_layout.addWidget(version_group)
r_priority_group = QtGui.QGroupBox("Read Priority")
r_priority_layout = QtGui.QVBoxLayout()
r_priority_off = QtGui.QRadioButton("Local")
r_priority_layout.addWidget(r_priority_off)
r_priority_on = QtGui.QRadioButton("Persistent Store")
r_priority_layout.addWidget(r_priority_on)
r_priority_group.setLayout(r_priority_layout)
pref_layout.addWidget(r_priority_group)
w_priority_group = QtGui.QGroupBox("Write Priority")
w_priority_layout = QtGui.QVBoxLayout()
w_priority_off = QtGui.QRadioButton("Local")
w_priority_layout.addWidget(w_priority_off)
w_priority_on = QtGui.QRadioButton("Persistent Store")
w_priority_layout.addWidget(w_priority_on)
w_priority_group.setLayout(w_priority_layout)
pref_layout.addWidget(w_priority_group)
layout.addLayout(pref_layout)
button_layout = QtGui.QHBoxLayout()
button_layout.setDirection(QtGui.QBoxLayout.RightToLeft)
button_layout.setAlignment(QtCore.Qt.AlignRight)
ok_button = QtGui.QPushButton("OK")
ok_button.setFixedWidth(100)
self.connect(ok_button, QtCore.SIGNAL('clicked()'), self.close)
button_layout.addWidget(ok_button)
cancel_button = QtGui.QPushButton("Cancel")
cancel_button.setFixedWidth(100)
self.connect(cancel_button, QtCore.SIGNAL('clicked()'), self.cancel)
button_layout.addWidget(cancel_button)
layout.addLayout(button_layout)
self.setLayout(layout)
def close(self):
self.done(QtGui.QDialog.Accepted)
def cancel(self):
self.done(QtGui.QDialog.Rejected)
def choose_file(self):
chosen_file = \
QtGui.QFileDialog.getOpenFileName(self,
'Use File...',
self.current_file,
'All files (*.*)')
if chosen_file:
self.current_file = chosen_file
self.filename_edit.setText(self.current_file)
def build_table(self):
# read sql table from sqlite3
# display table
pass
def select_file(self):
# present file dialog and allow user to choose file
# compute file hash
# if content exists in persistent store, ask to reuse
# (or set reuse checkbox)
# set use_local checkbox to True
# don't transfer until execution?
# create uuid for file
pass
def select_remote(self):
# present table of data with search box
# each data has name, id, user, date, tags, etc.
# if user selects, check use_local checkbox to False
# store local is False, too?
# don't transfer file until execution?
# populate uuid
pass
# starting choice: local file or find existing managed file?
# allow branch off of existing managed file -- ie no longer a
# version of the initial file?
# === options ===
# overwrite managed on change
# overwrite local on change
# local is authoritative
# keep local copy -- need to allow where to store
class PathChooserLayout(QtGui.QHBoxLayout):
def __init__(self, is_dir=False, par_widget=None, parent=None):
# QtGui.QWidget.__init__(self, parent)
QtGui.QHBoxLayout.__init__(self, parent)
self.par_widget = par_widget
# layout = QtGui.QHBoxLayout()
self.pathname_edit = QtGui.QLineEdit()
self.addWidget(self.pathname_edit)
pathname_button = QtGui.QToolButton()
pathname_button.setIcon(
QtGui.QIcon(pathname_button.style().standardPixmap(
QtGui.QStyle.SP_DirOpenIcon)))
pathname_button.setIconSize(QtCore.QSize(12,12))
pathname_button.connect(pathname_button,
QtCore.SIGNAL('clicked()'),
self.choose_path)
self.addWidget(pathname_button)
# layout.setContentsMargins(1,1,1,1)
# self.setLayout(layout)
self.is_dir = is_dir
def choose_path(self):
if self.is_dir:
chosen_path = \
QtGui.QFileDialog.getExistingDirectory(self.par_widget,
'Use Directory...',
self.pathname_edit.text())
else:
chosen_path = \
QtGui.QFileDialog.getOpenFileName(self.par_widget,
'Use File...',
self.pathname_edit.text(),
'All files (*.*)')
if chosen_path and chosen_path:
self.pathname_edit.setText(chosen_path)
self.emit(QtCore.SIGNAL('pathnameChanged()'))
def get_path(self):
return str(self.pathname_edit.text())
def set_path(self, pathname):
if pathname:
self.pathname_edit.setText(pathname)
else:
self.pathname_edit.clear()
class PersistentRefViewSearch(QtGui.QGroupBox):
def __init__(self, path_type=None, parent=None):
QtGui.QGroupBox.__init__(self, parent)
self.build_gui(path_type)
def build_gui(self, path_type):
layout = QtGui.QVBoxLayout()
self.search_ref = QSearchBox(False, False)
self.connect(self.search_ref,
QtCore.SIGNAL('executeSearch(QString)'),
self.search_string)
self.connect(self.search_ref,
QtCore.SIGNAL('resetSearch()'),
self.reset_search)
layout.addWidget(self.search_ref)
self.ref_widget = PersistentRefView(path_type, self)
layout.addWidget(self.ref_widget)
layout.setMargin(0)
self.setLayout(layout)
def search_string(self, str):
self.ref_widget.model().setFilterWildcard(str)
def reset_search(self):
self.ref_widget.model().setFilterWildcard('')
self.ref_widget.model().invalidate()
class PersistentPathConfiguration(StandardModuleConfigurationWidget):
def __init__(self, module, controller, parent=None,
is_input=None, path_type=None):
StandardModuleConfigurationWidget.__init__(self, module, controller,
parent)
# set title
if module.has_annotation_with_key('__desc__'):
label = module.get_annotation_by_key('__desc__').value.strip()
title = '%s (%s) Module Configuration' % (label, module.name)
else:
title = '%s Module Configuration' % module.name
self.setWindowTitle(title)
self.build_gui(is_input, path_type)
self.set_values()
def sizeHint(self):
return QtCore.QSize(800, 600)
def build_gui(self, is_input, path_type):
self.current_path = ""
layout = QtGui.QVBoxLayout()
# layout.setMargin(0)
# layout.setSpacing(0)
if not is_input:
self.managed_change = \
QtGui.QRadioButton("Always Create New Reference")
layout.addWidget(self.managed_change)
self.connect(self.managed_change, QtCore.SIGNAL("toggled(bool)"),
self.managed_toggle)
else:
self.managed_change = None
self.managed_new = QtGui.QRadioButton("Create New Reference")
self.connect(self.managed_new, QtCore.SIGNAL("toggled(bool)"),
self.new_toggle)
layout.addWidget(self.managed_new)
self.new_group = QtGui.QGroupBox()
new_layout = QtGui.QGridLayout()
self.new_file = None
if is_input:
new_layout.addWidget(QtGui.QLabel("Path:"), 0, 0)
self.new_file = self.get_chooser_layout()
if hasattr(self.new_file, 'pathname_edit'):
self.connect(self.new_file.pathname_edit,
QtCore.SIGNAL("textChanged(QString)"),
self.stateChange)
new_layout.addLayout(self.new_file, 0, 1)
self.connect(self.new_file, QtCore.SIGNAL("pathnameChanged()"),
self.new_file_changed)
new_layout.addWidget(QtGui.QLabel("Name:"), 1, 0)
self.name_edit = QtGui.QLineEdit()
self.connect(self.name_edit, QtCore.SIGNAL("textChanged(QString)"),
self.stateChange)
new_layout.addWidget(self.name_edit, 1, 1)
new_layout.addWidget(QtGui.QLabel("Tags:"), 2, 0)
self.tags_edit = QtGui.QLineEdit()
self.connect(self.tags_edit, QtCore.SIGNAL("textChanged(QString)"),
self.stateChange)
new_layout.addWidget(self.tags_edit, 2, 1)
self.new_group.setLayout(new_layout)
layout.addWidget(self.new_group)
self.managed_existing = QtGui.QRadioButton("Use Existing Reference")
self.connect(self.managed_existing, QtCore.SIGNAL("toggled(bool)"),
self.existing_toggle)
layout.addWidget(self.managed_existing)
# self.existing_group = QtGui.QGroupBox()
# existing_layout = QtGui.QVBoxLayout()
# self.search_ref = QSearchBox(False, False)
# self.connect(self.search_ref,
# QtCore.SIGNAL('executeSearch(QString)'),
# self.search_string)
# self.connect(self.search_ref,
# QtCore.SIGNAL('resetSearch()'),
# self.reset_search)
# existing_layout.addWidget(self.search_ref)
# self.ref_widget = PersistentRefView(path_type, self)
# existing_layout.addWidget(self.ref_widget)
# self.existing_group.setLayout(existing_layout)
self.existing_group = PersistentRefViewSearch(path_type)
self.ref_widget = self.existing_group.ref_widget
self.connect(self.ref_widget,
QtCore.SIGNAL("clicked(QModelIndex)"),
self.ref_changed)
layout.addWidget(self.existing_group)
self.keep_local = QtGui.QCheckBox("Keep Local Version")
layout.addWidget(self.keep_local)
self.connect(self.keep_local, QtCore.SIGNAL("toggled(bool)"),
self.local_toggle)
self.local_group = QtGui.QGroupBox()
local_layout = QtGui.QGridLayout()
self.local_path = self.get_chooser_layout()
if hasattr(self.local_path, 'pathname_edit'):
self.connect(self.local_path.pathname_edit,
QtCore.SIGNAL("textChanged(QString)"),
self.stateChange)
local_layout.addLayout(self.local_path,0,0,1,2)
self.r_priority_local = QtGui.QCheckBox("Read From Local Path")
local_layout.addWidget(self.r_priority_local,1,0)
self.write_managed_checkbox = QtGui.QCheckBox("Write To Local Path")
self.connect(self.write_managed_checkbox, QtCore.SIGNAL("toggled(bool)"),
self.stateChange)
local_layout.addWidget(self.write_managed_checkbox,1,1)
self.local_group.setLayout(local_layout)
layout.addWidget(self.local_group)
if is_input:
self.r_priority_local.setEnabled(True)
self.write_managed_checkbox.setEnabled(False)
else:
self.r_priority_local.setEnabled(False)
self.write_managed_checkbox.setEnabled(True)
button_layout = QtGui.QHBoxLayout()
button_layout.setDirection(QtGui.QBoxLayout.RightToLeft)
button_layout.setAlignment(QtCore.Qt.AlignRight)
self.saveButton = QtGui.QPushButton("Save")
self.saveButton.setFixedWidth(100)
self.connect(self.saveButton, QtCore.SIGNAL('clicked(bool)'),
self.saveTriggered)
button_layout.addWidget(self.saveButton)
self.resetButton = QtGui.QPushButton("Reset")
self.resetButton.setFixedWidth(100)
self.connect(self.resetButton, QtCore.SIGNAL('clicked()'),
self.resetTriggered)
button_layout.addWidget(self.resetButton)
layout.addLayout(button_layout)
self.setLayout(layout)
def saveTriggered(self, checked = False):
self.get_values()
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.state_changed = False
self.emit(QtCore.SIGNAL('doneConfigure'), self.module.id)
def closeEvent(self, event):
self.askToSaveChanges()
event.accept()
def resetTriggered(self):
self.set_values()
self.setUpdatesEnabled(True)
self.state_changed = False
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
def stateChange(self, checked = False, old = None):
if not self.state_changed:
self.state_changed = True
self.saveButton.setEnabled(True)
self.resetButton.setEnabled(True)
def managed_toggle(self, checked):
self.stateChange()
self.new_group.setEnabled(not checked)
self.existing_group.setEnabled(not checked)
def new_toggle(self, checked):
self.stateChange()
self.new_group.setEnabled(checked)
self.existing_group.setEnabled(not checked)
if not checked and self.keep_local.isChecked():
self.keep_local.setChecked(False)
def existing_toggle(self, checked):
self.stateChange()
self.existing_group.setEnabled(checked)
self.new_group.setEnabled(not checked)
def local_toggle(self, checked):
self.stateChange()
self.local_group.setEnabled(checked)
def new_file_changed(self):
self.stateChange()
new_file = str(self.new_file.get_path())
if new_file:
base_name = os.path.basename(new_file)
else:
base_name = ''
self.name_edit.setText(base_name)
self.keep_local.setChecked(True)
self.local_path.set_path(new_file)
self.r_priority_local.setChecked(True)
self.write_managed_checkbox.setChecked(False)
def ref_changed(self, index):
self.stateChange()
if self.keep_local.isChecked():
self.keep_local.setChecked(False)
def set_values(self):
from vistrails.core.modules.module_registry import get_module_registry
reg = get_module_registry()
PersistentRef = \
reg.get_descriptor_by_name(persistence_pkg, 'PersistentRef').module
def func_to_bool(function):
try:
value = function.parameters[0].strValue
except IndexError:
return False
if value and value == 'True':
return True
ref_exists = False
self.existing_ref = None
local_path = None
local_read = None
local_write = None
for function in self.module.functions:
if function.name == 'ref':
self.existing_ref = PersistentRef.translate_to_python(
function.parameters[0].strValue)
self.ref_widget.set_id(self.existing_ref.id)
ref_exists = \
self.ref_widget.set_version(self.existing_ref.version)
self.existing_ref._exists = ref_exists
print 'ref_exists:', ref_exists, self.existing_ref.id, \
self.existing_ref.version
elif function.name == 'value':
if self.new_file:
self.new_file.set_path(function.parameters[0].strValue)
elif function.name == 'localPath':
local_path = Path.translate_to_python(
function.parameters[0].strValue).name
elif function.name == 'readLocal':
local_read = func_to_bool(function)
elif function.name == 'writeLocal':
local_write = func_to_bool(function)
if ref_exists:
self.managed_existing.setChecked(True)
self.existing_toggle(True)
elif self.managed_change and (not self.existing_ref or \
not self.existing_ref.id):
self.managed_change.setChecked(True)
self.managed_toggle(True)
else:
self.managed_new.setChecked(True)
self.new_toggle(True)
if self.existing_ref:
self.name_edit.setText(self.existing_ref.name)
self.tags_edit.setText(self.existing_ref.tags)
if self.existing_ref:
if self.existing_ref.local_path:
self.keep_local.setChecked(True)
self.local_toggle(True)
else:
self.keep_local.setChecked(False)
self.local_toggle(False)
self.local_path.set_path(self.existing_ref.local_path)
self.r_priority_local.setChecked(self.existing_ref.local_read)
self.write_managed_checkbox.setChecked(
self.existing_ref.local_writeback)
else:
self.keep_local.setChecked(False)
self.local_toggle(False)
if local_path is not None:
if local_path:
self.keep_local.setChecked(True)
self.local_toggle(True)
else:
self.keep_local.setChecked(False)
self.local_toggle(False)
self.local_path.set_path(local_path)
if local_read is not None:
self.r_priority_local.setChecked(local_read)
if local_write is not None:
self.write_managed_checkbox.setChecked(local_write)
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.state_changed = False
def get_values(self):
from vistrails.core.modules.module_registry import get_module_registry
reg = get_module_registry()
PersistentRef = \
reg.get_descriptor_by_name(persistence_pkg, 'PersistentRef').module
functions = []
if self.new_file and self.new_file.get_path() and \
self.managed_new.isChecked():
# if self.new_file and self.new_file.get_path():
functions.append(('value', [self.new_file.get_path()]))
else:
functions.append(('value', None))
pass
ref = PersistentRef()
if self.managed_new.isChecked():
if self.existing_ref and not self.existing_ref._exists:
ref.id = self.existing_ref.id
ref.version = self.existing_ref.version
else:
ref.id = str(uuid.uuid1())
ref.version = None
# endif
ref.name = str(self.name_edit.text())
ref.tags = str(self.tags_edit.text())
elif self.managed_existing.isChecked():
(ref.id, ref.version, ref.name, ref.tags) = \
self.ref_widget.get_info()
if self.keep_local.isChecked():
functions.append(('localPath', [self.local_path.get_path()]))
functions.append(('readLocal',
[str(self.r_priority_local.isChecked())]))
functions.append(('writeLocal',
[str(self.write_managed_checkbox.isChecked())]))
# ref.local_path = self.local_path.get_path()
# ref.local_read = self.r_priority_local.isChecked()
# ref.local_writeback = self.write_managed_checkbox.isChecked()
else:
ref.local_path = None
# functions.append(('localPath', None))
functions.append(('readLocal', None))
functions.append(('writeLocal', None))
pass
functions.append(('ref', [PersistentRef.translate_to_string(ref)]))
self.controller.update_functions(self.module, functions)
class PersistentInputPathConfiguration(PersistentPathConfiguration):
def __init__(self, module, controller, parent=None, path_type=None):
PersistentPathConfiguration.__init__(self, module, controller, parent,
True, path_type)
class PersistentOutputPathConfiguration(PersistentPathConfiguration):
def __init__(self, module, controller, parent=None, path_type=None):
PersistentPathConfiguration.__init__(self, module, controller, parent,
False, path_type)
class PersistentRefInlineWidget(QtGui.QWidget, ConstantWidgetMixin):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
self.param = param
self.strValue = param.strValue
contentsType = param.type
QtGui.QWidget.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
layout = QtGui.QHBoxLayout()
# FIXME Use a greyed QLineEdit?
# layout.addWidget(QtGui.QLabel("File Info:"))
button = QtGui.QPushButton("Configure")
button.setMaximumWidth(100)
self.connect(button, QtCore.SIGNAL('clicked()'), self.run_dialog)
layout.addWidget(button)
layout.setMargin(5)
layout.setSpacing(5)
self.setLayout(layout)
def run_dialog(self):
dialog = PersistentRefDialog(self.param)
if dialog.exec_() == QtGui.QDialog.Accepted:
self.setContents("test")
#use same translate call?, False)
def contents(self):
return self.strValue
def setContents(self, strValue, silent=True):
self.strValue = strValue
if not silent:
self.update_parent()
class PersistentInputFileConfiguration(PersistentInputPathConfiguration):
def __init__(self, module, controller, parent=None):
PersistentInputPathConfiguration.__init__(self, module, controller,
parent, "blob")
def get_chooser_layout(self):
return PathChooserLayout(False, self)
class PersistentInputDirConfiguration(PersistentInputPathConfiguration):
def __init__(self, module, controller, parent=None):
PersistentInputPathConfiguration.__init__(self, module, controller,
parent, "tree")
def get_chooser_layout(self):
return PathChooserLayout(True, self)
class PersistentOutputFileConfiguration(PersistentOutputPathConfiguration):
def __init__(self, module, controller, parent=None):
PersistentOutputPathConfiguration.__init__(self, module, controller,
parent, "blob")
def get_chooser_layout(self):
return PathChooserLayout(False, self)
class PersistentOutputDirConfiguration(PersistentOutputPathConfiguration):
def __init__(self, module, controller, parent=None):
PersistentOutputPathConfiguration.__init__(self, module, controller,
parent, "tree")
def get_chooser_layout(self):
return PathChooserLayout(True, self)
class PersistentConfiguration(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.setModal(False)
self.build_gui()
self.db_access = DatabaseAccessSingleton()
def build_gui(self):
layout = QtGui.QVBoxLayout()
self.ref_search = PersistentRefViewSearch(None)
self.ref_search.ref_widget.setSelectionMode(
QtGui.QAbstractItemView.ExtendedSelection)
layout.addWidget(self.ref_search)
button_layout = QtGui.QHBoxLayout()
button_layout.setAlignment(QtCore.Qt.AlignRight)
write_button = QtGui.QPushButton("Write...")
write_button.setAutoDefault(False)
self.connect(write_button, QtCore.SIGNAL("clicked()"), self.write)
button_layout.addWidget(write_button)
delete_button = QtGui.QPushButton("Delete...")
delete_button.setAutoDefault(False)
self.connect(delete_button, QtCore.SIGNAL("clicked()"), self.delete)
button_layout.addWidget(delete_button)
layout.addLayout(button_layout)
self.setLayout(layout)
def sizeHint(self):
return QtCore.QSize(800,320)
def write(self):
info_list = self.ref_search.ref_widget.get_info_list()
if len(info_list) < 1:
return
elif len(info_list) == 1:
# save single file/dir
info = info_list[0]
name = info[2]
chosen_path = QtGui.QFileDialog.getSaveFileName(
self,
'Save...',
name)
if not chosen_path:
return
# FIXME really should move this calls to a higher level so
# we don't need to instantiate a module
if info[1] is None:
version = "HEAD"
else:
version = info[1]
repo.get_current_repo().get_path(info[0], version, None,
chosen_path)
else:
# have multiple files/dirs
chosen_path = QtGui.QFileDialog.getExistingDirectory(
self,
'Save All to Directory...')
has_overwrite = False
# if untitled (no name, use the uuid)
for info in info_list:
if info[2]:
name = info[2]
else:
name = info[0]
full_path = os.path.join(chosen_path, name)
if os.path.exists(full_path):
has_overwrite = True
if has_overwrite:
question_str = "One or more of the paths already exist. " + \
"Overwrite?"
ret_val = \
QtGui.QMessageBox.question(self, "Overwrite", \
question_str, \
QtGui.QMessageBox.Cancel | \
QtGui.QMessageBox.No | \
QtGui.QMessageBox.Yes)
if ret_val != QtGui.QMessageBox.Yes:
return
for info in info_list:
if info[1] is None:
version = "HEAD"
else:
version = info[1]
if info[2]:
name = info[2]
else:
name = info[0]
full_path = os.path.join(chosen_path, name)
repo.get_current_repo().git_get_path(info[0], version, None,
full_path)
def delete(self):
QtGui.QMessageBox.critical(self, "Delete",
"This feature is not functional in the "
"current version of VisTrails and has been "
"disabled for this release.")
return
from init import PersistentPath
info_list = self.ref_search.ref_widget.get_info_list()
if len(info_list) < 1:
return
delete_str = "This will permanently delete the selected data " + \
"from the peristent store. This cannot be undone. Proceed?"
question_f = QtGui.QMessageBox.question
ret_val = question_f(self, "Delete", delete_str, \
QtGui.QMessageBox.Cancel | \
QtGui.QMessageBox.Ok)
if ret_val != QtGui.QMessageBox.Ok:
return
git_util = PersistentPath()
db_access = DatabaseAccessSingleton()
# FIXME keep entry in database with flag for deleted?
# NEED TO update the model...
for info in info_list:
delete_where = {'id': info[0]}
if info[1] is None:
git_util.git_remove_path(info[0])
db_access.delete_from_database(delete_where)
else:
# FIXME implement delete for versions...
delete_where['version'] = info[1]
print "NOT IMPLEMENTED FOR VERSIONS!!"
|
|
"""VPX platform."""
import asyncio
from typing import Dict, Optional
import logging
from mpf.platforms.interfaces.driver_platform_interface import DriverPlatformInterface, PulseSettings, HoldSettings
from mpf.platforms.interfaces.light_platform_interface import LightPlatformInterface
from mpf.platforms.interfaces.switch_platform_interface import SwitchPlatformInterface
from mpf.core.platform import LightsPlatform, SwitchPlatform, DriverPlatform, SwitchSettings, DriverSettings, \
SwitchConfig, DriverConfig, RepulseSettings
class VirtualPinballSwitch(SwitchPlatformInterface):
"""A switch in VPX."""
__slots__ = ["state"]
def __init__(self, config, number, platform):
"""Initialise switch."""
super().__init__(config, number, platform)
self.state = self.config.invert
def get_board_name(self):
"""Return the name of the board of this switch."""
return "VPX"
class VirtualPinballLight(LightPlatformInterface):
"""A light in VPX."""
__slots__ = ["_current_fade", "subtype", "hw_number", "machine"]
def __init__(self, number, subtype, hw_number, machine):
"""Initialise LED."""
super().__init__(number)
self._current_fade = (0, -1, 0, -1)
self.subtype = subtype
self.hw_number = hw_number
self.machine = machine
@property
def current_brightness(self) -> float:
"""Return current brightness."""
current_time = self.machine.clock.get_time()
start_brightness, start_time, target_brightness, target_time = self._current_fade
if target_time > current_time:
ratio = ((current_time - start_time) /
(target_time - start_time))
return start_brightness + (target_brightness - start_brightness) * ratio
return target_brightness
def set_fade(self, start_brightness, start_time, target_brightness, target_time):
"""Set fade."""
self._current_fade = (start_brightness, start_time, target_brightness, target_time)
def get_board_name(self):
"""Return the name of the board of this light."""
return "VPX"
def is_successor_of(self, other):
"""Not implemented."""
raise AssertionError("Not implemented. Let us know if you need it.")
def get_successor_number(self):
"""Not implemented."""
raise AssertionError("Not implemented. Let us know if you need it.")
def __lt__(self, other):
"""Not implemented."""
raise AssertionError("Not implemented. Let us know if you need it.")
class VirtualPinballDriver(DriverPlatformInterface):
"""A driver in VPX."""
__slots__ = ["clock", "_state"]
def __init__(self, config, number, clock):
"""Initialise virtual driver to disabled."""
super().__init__(config, number)
self.clock = clock
self._state = False
def get_board_name(self):
"""Return the name of the board of this driver."""
return "VPX"
def disable(self):
"""Disable virtual coil."""
self._state = False
def enable(self, pulse_settings: PulseSettings, hold_settings: HoldSettings):
"""Enable virtual coil."""
del pulse_settings, hold_settings
self._state = True
def pulse(self, pulse_settings: PulseSettings):
"""Pulse virtual coil."""
self._state = self.clock.get_time() + (pulse_settings.duration / 1000.0)
def timed_enable(self, pulse_settings: PulseSettings, hold_settings: HoldSettings):
"""Pulse and enable the coil for an explicit duration."""
raise NotImplementedError
@property
def state(self) -> bool:
"""Return current state."""
# pylint: disable-msg=no-else-return
if isinstance(self._state, bool):
return self._state
else:
return bool(self.clock.get_time() < self._state)
class VirtualPinballPlatform(LightsPlatform, SwitchPlatform, DriverPlatform):
"""VPX platform."""
__slots__ = ["_lights", "_switches", "_drivers", "_last_drivers", "_last_lights", "_started", "rules"]
def __init__(self, machine):
"""Initialise VPX platform."""
super().__init__(machine)
self._lights = {} # type: Dict[str, VirtualPinballLight]
self._switches = {} # type: Dict[str, VirtualPinballSwitch]
self._drivers = {} # type: Dict[str, VirtualPinballDriver]
self._last_drivers = {} # type: Dict[str, bool]
self._last_lights = {} # type: Dict[str, bool]
self._started = asyncio.Event()
self.log = logging.getLogger("VPX Platform")
self.log.debug("Configuring VPX hardware interface.")
self.rules = {}
async def initialize(self):
"""Initialise platform."""
self.machine.bcp.interface.register_command_callback("vpcom_bridge", self._dispatch)
self.machine.events.add_async_handler("init_phase_5", self._wait_for_connect)
async def _wait_for_connect(self):
"""Wait until VPX connects."""
await self._started.wait()
async def _dispatch(self, client, subcommand=None, **kwargs):
"""Dispatch a VPX COM call."""
self.log.debug("Got command %s args: %s", subcommand, kwargs)
if not subcommand:
self.machine.bcp.transport.send_to_client(client, "vpcom_bridge_response", error="command missing")
try:
method = getattr(self, "vpx_" + subcommand)
except AttributeError:
self.machine.bcp.transport.send_to_client(client, "vpcom_bridge_response",
error="Unknown command {}".format(subcommand))
return
try:
result = method(**kwargs)
# pylint: disable-msg=broad-except
except Exception as e:
self.machine.bcp.transport.send_to_client(client, "vpcom_bridge_response",
error="Exception: {}".format(e))
return
self.machine.bcp.transport.send_to_client(client, "vpcom_bridge_response", result=result)
def vpx_start(self):
"""Start machine."""
self._started.set()
return True
def vpx_get_switch(self, number):
"""Return switch value."""
# pylint: disable-msg=no-else-return
if self._switches[str(number)].config.invert:
return not self._switches[str(number)].state
else:
return self._switches[str(number)].state
def vpx_switch(self, number):
"""Return switch value."""
return self.vpx_get_switch(number)
def vpx_set_switch(self, number, value):
"""Update switch from VPX."""
self._switches[str(number)].state = value
self.machine.switch_controller.process_switch_by_num(state=1 if value else 0,
num=str(number),
platform=self)
return True
def vpx_pulsesw(self, number):
"""Pulse switch from VPX."""
self._switches[str(number)].state = True
self.machine.switch_controller.process_switch_by_num(state=1,
num=str(number),
platform=self)
self._switches[str(number)].state = False
self.machine.switch_controller.process_switch_by_num(state=0,
num=str(number),
platform=self)
return True
def vpx_changed_solenoids(self):
"""Return changed solenoids since last call."""
changed_drivers = []
for number, driver in self._drivers.items():
if driver.state != self._last_drivers[number]:
changed_drivers.append((number, driver.state))
self._last_drivers[number] = driver.state
return changed_drivers
def _get_changed_lights_by_subtype(self, subtype):
"""Return changed lights since last call."""
changed_lamps = []
for number, light in self._lights.items():
if light.subtype != subtype:
continue
brightness = light.current_brightness
state = bool(brightness > 0.5)
if state != self._last_lights[number]:
changed_lamps.append((light.hw_number, state))
self._last_lights[number] = state
return changed_lamps
def vpx_changed_lamps(self):
"""Return changed lamps since last call."""
return self._get_changed_lights_by_subtype("matrix")
def vpx_changed_gi_strings(self):
"""Return changed lamps since last call."""
return self._get_changed_lights_by_subtype("gi")
def vpx_changed_leds(self):
"""Return changed lamps since last call."""
return self._get_changed_lights_by_subtype("led")
def vpx_changed_flashers(self):
"""Return changed lamps since last call."""
return self._get_changed_lights_by_subtype("flasher")
def vpx_mech(self, number):
"""Not implemented."""
self.log.warning("Command \"mech\" unimplemented: %s", number)
return True
def vpx_get_mech(self, number):
"""Not implemented."""
self.log.warning("Command \"get_mech\" unimplemented: %s", number)
return True
def vpx_set_mech(self, number, value):
"""Not implemented."""
self.log.warning("Command \"set_mech\" unimplemented: %s %s", number, value)
return True
def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> "SwitchPlatformInterface":
"""Configure VPX switch."""
number = str(number)
switch = VirtualPinballSwitch(config, number, self)
self._switches[number] = switch
return switch
def configure_driver(self, config: DriverConfig, number: str, platform_settings: dict) -> "DriverPlatformInterface":
"""Configure VPX driver."""
number = str(number)
driver = VirtualPinballDriver(config, number, self.machine.clock)
self._drivers[number] = driver
self._last_drivers[number] = False
return driver
def vpx_get_hardwarerules(self):
"""Return hardware rules."""
hardware_rules = []
for rswitchandcoil, hold in self.rules.items():
hardware_rules.append((rswitchandcoil[0].number, rswitchandcoil[1].number, hold))
return hardware_rules
def _add_rule(self, switch, coil, hold):
"""Add rule with or without hold."""
if (switch, coil) in self.rules:
raise AssertionError("Overwrote a rule without clearing it first {} <-> {}".format(
switch, coil))
self.rules[(switch, coil)] = hold
def set_pulse_on_hit_and_enable_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
"""Pulse on hit and hold."""
self._add_rule(enable_switch.hw_switch, coil.hw_driver, True)
def set_pulse_on_hit_and_release_and_disable_rule(self, enable_switch: SwitchSettings, eos_switch: SwitchSettings,
coil: DriverSettings,
repulse_settings: Optional[RepulseSettings]):
"""Pulse on hit, disable on disable_switch hit."""
del eos_switch
# eos_switch is missing here intentionally
self._add_rule(enable_switch.hw_switch, coil.hw_driver, False)
def set_pulse_on_hit_and_enable_and_release_and_disable_rule(self, enable_switch: SwitchSettings,
eos_switch: SwitchSettings, coil: DriverSettings,
repulse_settings: Optional[RepulseSettings]):
"""Pulse on hit and hold, disable on disable_switch hit."""
del eos_switch
# eos_switch is missing here intentionally
self._add_rule(enable_switch.hw_switch, coil.hw_driver, True)
def set_pulse_on_hit_and_release_rule(self, enable_switch: SwitchSettings, coil: DriverSettings):
"""Pulse on hit and hold."""
self._add_rule(enable_switch.hw_switch, coil.hw_driver, True)
def set_pulse_on_hit_rule(self, enable_switch: SwitchSettings,
coil: DriverSettings):
"""Pulse on hit and release."""
self._add_rule(enable_switch.hw_switch, coil.hw_driver, False)
def clear_hw_rule(self, switch: SwitchSettings, coil: DriverSettings):
"""Clear hw rule."""
if (switch.hw_switch, coil.hw_driver) in self.rules:
del self.rules[(switch.hw_switch, coil.hw_driver)]
else:
self.log.debug("Tried to clear a non-existing rules %s <-> %s", switch, coil)
def vpx_get_coilactive(self, number):
"""Return True if a MPF hw rule for the coil(number) exists."""
for rswitchandcoil, _ in self.rules.items():
if rswitchandcoil[1].number == number:
return True
return False
async def get_hw_switch_states(self):
"""Return initial switch state."""
hw_switches = {}
for switch in self._switches.values():
hw_switches[switch.number] = switch.state ^ switch.config.invert
return hw_switches
def configure_light(self, number: str, subtype: str, config, platform_settings: dict) -> "LightPlatformInterface":
"""Configure a VPX light."""
del config
if subtype and subtype not in ("gi", "matrix", "led", "flasher"):
raise AssertionError("Unknown subtype: {}".format(subtype))
if not subtype:
subtype = "matrix"
number = str(number)
key = number + "-" + subtype
light = VirtualPinballLight(key, subtype, number, self.machine)
self._lights[key] = light
self._last_lights[key] = False
return light
def parse_light_number_to_channels(self, number: str, subtype: str):
"""Parse channel str to a list of channels."""
# pylint: disable-msg=no-else-return
if subtype in ("gi", "matrix", "led", "flasher") or not subtype:
return [
{
"number": str(number)
}
]
else:
raise AssertionError("Unknown subtype {}".format(subtype))
|
|
import unittest
import sys
import numpy as np
import scipy.sparse as sp
import joblib
from io import StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import deprecated
from sklearn.utils._testing import (assert_raises_regex,
ignore_warnings,
assert_warns, assert_raises,
SkipTest)
from sklearn.utils.estimator_checks import check_estimator, _NotAnArray
from sklearn.utils.estimator_checks \
import check_class_weight_balanced_linear_classifier
from sklearn.utils.estimator_checks import set_random_state
from sklearn.utils.estimator_checks import _set_checking_parameters
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.utils.estimator_checks import check_fit_score_takes_y
from sklearn.utils.estimator_checks import check_no_attributes_set_in_init
from sklearn.utils.estimator_checks import check_classifier_data_not_an_array
from sklearn.utils.estimator_checks import check_regressor_data_not_an_array
from sklearn.utils.validation import check_is_fitted
from sklearn.utils.estimator_checks import check_outlier_corruption
from sklearn.utils.fixes import np_version, parse_version
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression, SGDClassifier
from sklearn.mixture import GaussianMixture
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import NMF
from sklearn.linear_model import MultiTaskElasticNet, LogisticRegression
from sklearn.svm import SVC, NuSVC
from sklearn.neighbors import KNeighborsRegressor
from sklearn.utils.validation import check_array
from sklearn.utils import all_estimators
from sklearn.exceptions import SkipTestWarning
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(ClassifierMixin, BaseEstimator):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class ChangesDict(BaseEstimator):
def __init__(self, key=0):
self.key = key
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
def predict(self, X):
X = check_array(X)
self.key = 1000
return np.ones(X.shape[0])
class SetsWrongAttribute(BaseEstimator):
def __init__(self, acceptable_key=0):
self.acceptable_key = acceptable_key
def fit(self, X, y=None):
self.wrong_attribute = 0
X, y = self._validate_data(X, y)
return self
class ChangesWrongAttribute(BaseEstimator):
def __init__(self, wrong_attribute=0):
self.wrong_attribute = wrong_attribute
def fit(self, X, y=None):
self.wrong_attribute = 1
X, y = self._validate_data(X, y)
return self
class ChangesUnderscoreAttribute(BaseEstimator):
def fit(self, X, y=None):
self._good_attribute = 1
X, y = self._validate_data(X, y)
return self
class RaisesErrorInSetParams(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if 'p' in kwargs:
p = kwargs.pop('p')
if p < 0:
raise ValueError("p can't be less than 0")
self.p = p
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class ModifiesValueInsteadOfRaisingError(BaseEstimator):
def __init__(self, p=0):
self.p = p
def set_params(self, **kwargs):
if 'p' in kwargs:
p = kwargs.pop('p')
if p < 0:
p = 0
self.p = p
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class ModifiesAnotherValue(BaseEstimator):
def __init__(self, a=0, b='method1'):
self.a = a
self.b = b
def set_params(self, **kwargs):
if 'a' in kwargs:
a = kwargs.pop('a')
self.a = a
if a is None:
kwargs.pop('b')
self.b = 'method2'
return super().set_params(**kwargs)
def fit(self, X, y=None):
X, y = self._validate_data(X, y)
return self
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = self._validate_data(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = self._validate_data(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
check_is_fitted(self)
X = check_array(X)
return np.ones(X.shape[0])
class NoSampleWeightPandasSeriesType(BaseEstimator):
def fit(self, X, y, sample_weight=None):
# Convert data
X, y = self._validate_data(
X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
# Function is only called after we verify that pandas is installed
from pandas import Series
if isinstance(sample_weight, Series):
raise ValueError("Estimator does not accept 'sample_weight'"
"of type pandas.Series")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class BadBalancedWeightsClassifier(BaseBadClassifier):
def __init__(self, class_weight=None):
self.class_weight = class_weight
def fit(self, X, y):
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import compute_class_weight
label_encoder = LabelEncoder().fit(y)
classes = label_encoder.classes_
class_weight = compute_class_weight(self.class_weight, classes=classes,
y=y)
# Intentionally modify the balanced class_weight
# to simulate a bug and raise an exception
if self.class_weight == "balanced":
class_weight += 1.
# Simply assigning coef_ to the class_weight
self.coef_ = class_weight
return self
class BadTransformerWithoutMixin(BaseEstimator):
def fit(self, X, y=None):
X = self._validate_data(X)
return self
def transform(self, X):
X = check_array(X)
return X
class NotInvariantPredict(BaseEstimator):
def fit(self, X, y):
# Convert data
X, y = self._validate_data(
X, y,
accept_sparse=("csr", "csc"),
multi_output=True,
y_numeric=True)
return self
def predict(self, X):
# return 1 if X has more than one element else return 0
X = check_array(X)
if X.shape[0] > 1:
return np.ones(X.shape[0])
return np.zeros(X.shape[0])
class LargeSparseNotSupportedClassifier(BaseEstimator):
def fit(self, X, y):
X, y = self._validate_data(
X, y,
accept_sparse=("csr", "csc", "coo"),
accept_large_sparse=True,
multi_output=True,
y_numeric=True)
if sp.issparse(X):
if X.getformat() == "coo":
if X.row.dtype == "int64" or X.col.dtype == "int64":
raise ValueError(
"Estimator doesn't support 64-bit indices")
elif X.getformat() in ["csc", "csr"]:
assert "int64" not in (X.indices.dtype, X.indptr.dtype),\
"Estimator doesn't support 64-bit indices"
return self
class SparseTransformer(BaseEstimator):
def fit(self, X, y=None):
self.X_shape_ = self._validate_data(X).shape
return self
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X):
X = check_array(X)
if X.shape[1] != self.X_shape_[1]:
raise ValueError('Bad number of features')
return sp.csr_matrix(X)
class EstimatorInconsistentForPandas(BaseEstimator):
def fit(self, X, y):
try:
from pandas import DataFrame
if isinstance(X, DataFrame):
self.value_ = X.iloc[0, 0]
else:
X = check_array(X)
self.value_ = X[1, 0]
return self
except ImportError:
X = check_array(X)
self.value_ = X[1, 0]
return self
def predict(self, X):
X = check_array(X)
return np.array([self.value_] * X.shape[0])
class UntaggedBinaryClassifier(SGDClassifier):
# Toy classifier that only supports binary classification, will fail tests.
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
super().fit(X, y, coef_init, intercept_init, sample_weight)
if len(self.classes_) > 2:
raise ValueError('Only 2 classes are supported')
return self
def partial_fit(self, X, y, classes=None, sample_weight=None):
super().partial_fit(X=X, y=y, classes=classes,
sample_weight=sample_weight)
if len(self.classes_) > 2:
raise ValueError('Only 2 classes are supported')
return self
class TaggedBinaryClassifier(UntaggedBinaryClassifier):
# Toy classifier that only supports binary classification.
def _more_tags(self):
return {'binary_only': True}
class RequiresPositiveYRegressor(LinearRegression):
def fit(self, X, y):
X, y = self._validate_data(X, y, multi_output=True)
if (y <= 0).any():
raise ValueError('negative y values not supported!')
return super().fit(X, y)
def _more_tags(self):
return {"requires_positive_y": True}
class PoorScoreLogisticRegression(LogisticRegression):
def decision_function(self, X):
return super().decision_function(X) + 1
def _more_tags(self):
return {"poor_score": True}
def test_not_an_array_array_function():
if np_version < parse_version('1.17'):
raise SkipTest("array_function protocol not supported in numpy <1.17")
not_array = _NotAnArray(np.ones(10))
msg = "Don't want to call array_function sum!"
assert_raises_regex(TypeError, msg, np.sum, not_array)
# always returns True
assert np.may_share_memory(not_array, None)
def test_check_fit_score_takes_y_works_on_deprecated_fit():
# Tests that check_fit_score_takes_y works on a class with
# a deprecated fit method
class TestEstimatorWithDeprecatedFitMethod(BaseEstimator):
@deprecated("Deprecated for the purpose of testing "
"check_fit_score_takes_y")
def fit(self, X, y):
return self
check_fit_score_takes_y("test", TestEstimatorWithDeprecatedFitMethod())
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "Passing a class was deprecated"
assert_raises_regex(TypeError, msg, check_estimator, object)
msg = "object has no attribute '_get_tags'"
assert_raises_regex(AttributeError, msg, check_estimator, object())
# check that values returned by get_params match set_params
msg = "get_params result does not match what was passed to set_params"
assert_raises_regex(AssertionError, msg, check_estimator,
ModifiesValueInsteadOfRaisingError())
assert_warns(UserWarning, check_estimator, RaisesErrorInSetParams())
assert_raises_regex(AssertionError, msg, check_estimator,
ModifiesAnotherValue())
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator())
# check that fit does input validation
msg = "ValueError not raised"
assert_raises_regex(AssertionError, msg, check_estimator,
BaseBadClassifier())
# check that sample_weights in fit accepts pandas.Series type
try:
from pandas import Series # noqa
msg = ("Estimator NoSampleWeightPandasSeriesType raises error if "
"'sample_weight' parameter is of type pandas.Series")
assert_raises_regex(
ValueError, msg, check_estimator, NoSampleWeightPandasSeriesType())
except ImportError:
pass
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator,
NoCheckinPredict())
# check that estimator state does not change
# at transform/predict/predict_proba time
msg = 'Estimator changes __dict__ during predict'
assert_raises_regex(AssertionError, msg, check_estimator, ChangesDict())
# check that `fit` only changes attribures that
# are private (start with an _ or end with a _).
msg = ('Estimator ChangesWrongAttribute should not change or mutate '
'the parameter wrong_attribute from 0 to 1 during fit.')
assert_raises_regex(AssertionError, msg,
check_estimator, ChangesWrongAttribute())
check_estimator(ChangesUnderscoreAttribute())
# check that `fit` doesn't add any public attribute
msg = (r'Estimator adds public attribute\(s\) during the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but wrong_attribute added')
assert_raises_regex(AssertionError, msg,
check_estimator, SetsWrongAttribute())
# check for invariant method
name = NotInvariantPredict.__name__
method = 'predict'
msg = ("{method} of {name} is not invariant when applied "
"to a subset.").format(method=method, name=name)
assert_raises_regex(AssertionError, msg,
check_estimator, NotInvariantPredict())
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator %s doesn't seem to fail gracefully on sparse data" % name
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier())
except Exception:
pass
finally:
sys.stdout = old_stdout
assert msg in string_buffer.getvalue()
# Large indices test on bad estimator
msg = ('Estimator LargeSparseNotSupportedClassifier doesn\'t seem to '
r'support \S{3}_64 matrix, and is not failing gracefully.*')
assert_raises_regex(AssertionError, msg, check_estimator,
LargeSparseNotSupportedClassifier())
# does error on binary_only untagged estimator
msg = 'Only 2 classes are supported'
assert_raises_regex(ValueError, msg, check_estimator,
UntaggedBinaryClassifier())
# non-regression test for estimators transforming to sparse data
check_estimator(SparseTransformer())
# doesn't error on actual estimator
check_estimator(LogisticRegression())
check_estimator(LogisticRegression(C=0.01))
check_estimator(MultiTaskElasticNet())
# doesn't error on binary_only tagged estimator
check_estimator(TaggedBinaryClassifier())
# Check regressor with requires_positive_y estimator tag
msg = 'negative y values not supported!'
assert_raises_regex(ValueError, msg, check_estimator,
RequiresPositiveYRegressor())
# Does not raise error on classifier with poor_score tag
check_estimator(PoorScoreLogisticRegression())
def test_check_outlier_corruption():
# should raise AssertionError
decision = np.array([0., 1., 1.5, 2.])
assert_raises(AssertionError, check_outlier_corruption, 1, 2, decision)
# should pass
decision = np.array([0., 1., 1., 2.])
check_outlier_corruption(1, 2, decision)
def test_check_estimator_transformer_no_mixin():
# check that TransformerMixin is not required for transformer tests to run
assert_raises_regex(AttributeError, '.*fit_transform.*',
check_estimator, BadTransformerWithoutMixin())
def test_check_estimator_clones():
# check that check_estimator doesn't modify the estimator it receives
from sklearn.datasets import load_iris
iris = load_iris()
for Estimator in [GaussianMixture, LinearRegression,
RandomForestClassifier, NMF, SGDClassifier,
MiniBatchKMeans]:
with ignore_warnings(category=FutureWarning):
# when 'est = SGDClassifier()'
est = Estimator()
_set_checking_parameters(est)
set_random_state(est)
# without fitting
old_hash = joblib.hash(est)
check_estimator(est)
assert old_hash == joblib.hash(est)
with ignore_warnings(category=FutureWarning):
# when 'est = SGDClassifier()'
est = Estimator()
_set_checking_parameters(est)
set_random_state(est)
# with fitting
est.fit(iris.data + 10, iris.target)
old_hash = joblib.hash(est)
check_estimator(est)
assert old_hash == joblib.hash(est)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "NotFittedError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier())
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier())
def test_check_no_attributes_set_in_init():
class NonConformantEstimatorPrivateSet(BaseEstimator):
def __init__(self):
self.you_should_not_set_this_ = None
class NonConformantEstimatorNoParamSet(BaseEstimator):
def __init__(self, you_should_set_this_=None):
pass
assert_raises_regex(AssertionError,
"Estimator estimator_name should not set any"
" attribute apart from parameters during init."
r" Found attributes \['you_should_not_set_this_'\].",
check_no_attributes_set_in_init,
'estimator_name',
NonConformantEstimatorPrivateSet())
assert_raises_regex(AssertionError,
"Estimator estimator_name should store all "
"parameters as an attribute during init. "
"Did not find attributes "
r"\['you_should_set_this_'\].",
check_no_attributes_set_in_init,
'estimator_name',
NonConformantEstimatorNoParamSet())
def test_check_estimator_pairwise():
# check that check_estimator() works on estimator with _pairwise
# kernel or metric
# test precomputed kernel
est = SVC(kernel='precomputed')
check_estimator(est)
# test precomputed metric
est = KNeighborsRegressor(metric='precomputed')
check_estimator(est)
def test_check_classifier_data_not_an_array():
assert_raises_regex(AssertionError,
'Not equal to tolerance',
check_classifier_data_not_an_array,
'estimator_name',
EstimatorInconsistentForPandas())
def test_check_regressor_data_not_an_array():
assert_raises_regex(AssertionError,
'Not equal to tolerance',
check_regressor_data_not_an_array,
'estimator_name',
EstimatorInconsistentForPandas())
def run_tests_without_pytest():
"""Runs the tests in this file without using pytest.
"""
main_module = sys.modules['__main__']
test_functions = [getattr(main_module, name) for name in dir(main_module)
if name.startswith('test_')]
test_cases = [unittest.FunctionTestCase(fn) for fn in test_functions]
suite = unittest.TestSuite()
suite.addTests(test_cases)
runner = unittest.TextTestRunner()
runner.run(suite)
def test_check_class_weight_balanced_linear_classifier():
# check that ill-computed balanced weights raises an exception
assert_raises_regex(AssertionError,
"Classifier estimator_name is not computing"
" class_weight=balanced properly.",
check_class_weight_balanced_linear_classifier,
'estimator_name',
BadBalancedWeightsClassifier)
def test_all_estimators_all_public():
# all_estimator should not fail when pytest is not installed and return
# only public estimators
estimators = all_estimators()
for est in estimators:
assert not est.__class__.__name__.startswith("_")
if __name__ == '__main__':
# This module is run as a script to check that we have no dependency on
# pytest for estimator checks.
run_tests_without_pytest()
def test_xfail_ignored_in_check_estimator():
# Make sure checks marked as xfail are just ignored and not run by
# check_estimator(), but still raise a warning.
assert_warns(SkipTestWarning, check_estimator, NuSVC())
|
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import six
import testscenarios
from oslo import messaging
from oslo.messaging._drivers import common as exceptions
from oslo.messaging.openstack.common import jsonutils
from tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
EXCEPTIONS_MODULE = 'exceptions' if six.PY2 else 'builtins'
class NovaStyleException(Exception):
format = 'I am Nova'
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if not message:
message = self.format % kwargs
super(NovaStyleException, self).__init__(message)
class KwargsStyleException(NovaStyleException):
format = 'I am %(who)s'
def add_remote_postfix(ex):
ex_type = type(ex)
message = str(ex)
str_override = lambda self: message
new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
{'__str__': str_override,
'__unicode__': str_override})
new_ex_type.__module__ = '%s_Remote' % ex.__class__.__module__
try:
ex.__class__ = new_ex_type
except TypeError:
ex.args = (message,) + ex.args[1:]
return ex
class SerializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_log_failure = [
('log_failure', dict(log_failure=True)),
('do_not_log_failure', dict(log_failure=False)),
]
_add_remote = [
('add_remote', dict(add_remote=True)),
('do_not_add_remote', dict(add_remote=False)),
]
_exception_types = [
('bog_standard', dict(cls=Exception,
args=['test'],
kwargs={},
clsname='Exception',
modname=EXCEPTIONS_MODULE,
msg='test')),
('nova_style', dict(cls=NovaStyleException,
args=[],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='I am Nova')),
('nova_style_with_msg', dict(cls=NovaStyleException,
args=['testing'],
kwargs={},
clsname='NovaStyleException',
modname=__name__,
msg='testing')),
('kwargs_style', dict(cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
clsname='KwargsStyleException',
modname=__name__,
msg='I am Oslo')),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._log_failure,
cls._add_remote,
cls._exception_types)
def setUp(self):
super(SerializeRemoteExceptionTestCase, self).setUp()
def test_serialize_remote_exception(self):
errors = []
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.stubs.Set(exceptions.LOG, 'error', stub_error)
try:
try:
raise self.cls(*self.args, **self.kwargs)
except Exception as ex:
cls_error = ex
if self.add_remote:
ex = add_remote_postfix(ex)
raise ex
except Exception:
exc_info = sys.exc_info()
serialized = exceptions.serialize_remote_exception(
exc_info, log_failure=self.log_failure)
failure = jsonutils.loads(serialized)
self.assertEqual(self.clsname, failure['class'], failure)
self.assertEqual(self.modname, failure['module'])
self.assertEqual(self.msg, failure['message'])
self.assertEqual([self.msg], failure['args'])
self.assertEqual(self.kwargs, failure['kwargs'])
# Note: _Remote prefix not stripped from tracebacks
tb = cls_error.__class__.__name__ + ': ' + self.msg
self.assertIn(tb, ''.join(failure['tb']))
if self.log_failure:
self.assertTrue(len(errors) > 0, errors)
else:
self.assertEqual(0, len(errors), errors)
SerializeRemoteExceptionTestCase.generate_scenarios()
class DeserializeRemoteExceptionTestCase(test_utils.BaseTestCase):
_standard_allowed = [__name__]
scenarios = [
('bog_standard',
dict(allowed=_standard_allowed,
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=Exception,
args=['test'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='Exception',
remote_args=('test\ntraceback\ntraceback\n', ),
remote_kwargs={})),
('nova_style',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=[],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('I am Nova', ),
remote_kwargs={})),
('nova_style_with_msg',
dict(allowed=_standard_allowed,
clsname='NovaStyleException',
modname=__name__,
cls=NovaStyleException,
args=['testing'],
kwargs={},
str='test\ntraceback\ntraceback\n',
remote_name='NovaStyleException_Remote',
remote_args=('testing', ),
remote_kwargs={})),
('kwargs_style',
dict(allowed=_standard_allowed,
clsname='KwargsStyleException',
modname=__name__,
cls=KwargsStyleException,
args=[],
kwargs={'who': 'Oslo'},
str='test\ntraceback\ntraceback\n',
remote_name='KwargsStyleException_Remote',
remote_args=('I am Oslo', ),
remote_kwargs={})),
('not_allowed',
dict(allowed=[],
clsname='NovaStyleException',
modname=__name__,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: NovaStyleException test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'NovaStyleException',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_module',
dict(allowed=['notexist'],
clsname='Exception',
modname='notexist',
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_exception',
dict(allowed=[],
clsname='FarcicalError',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: FarcicalError test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'FarcicalError',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('unknown_kwarg',
dict(allowed=[],
clsname='Exception',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={'foobar': 'blaa'},
str=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: Exception test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'Exception',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
('system_exit',
dict(allowed=[],
clsname='SystemExit',
modname=EXCEPTIONS_MODULE,
cls=messaging.RemoteError,
args=[],
kwargs={},
str=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
msg=("Remote error: SystemExit test\n"
"[%r]." % u'traceback\ntraceback\n'),
remote_name='RemoteError',
remote_args=(),
remote_kwargs={'exc_type': 'SystemExit',
'value': 'test',
'traceback': 'traceback\ntraceback\n'})),
]
def test_deserialize_remote_exception(self):
failure = {
'class': self.clsname,
'module': self.modname,
'message': 'test',
'tb': ['traceback\ntraceback\n'],
'args': self.args,
'kwargs': self.kwargs,
}
serialized = jsonutils.dumps(failure)
ex = exceptions.deserialize_remote_exception(serialized, self.allowed)
self.assertIsInstance(ex, self.cls)
self.assertEqual(self.remote_name, ex.__class__.__name__)
self.assertEqual(self.str, six.text_type(ex))
if hasattr(self, 'msg'):
self.assertEqual(self.msg, six.text_type(ex))
self.assertEqual((self.msg,) + self.remote_args, ex.args)
else:
self.assertEqual(self.remote_args, ex.args)
|
|
import os
import os.path
import socket
import textwrap
import shutil
import subprocess
import contextlib
import sys
import tempfile
import time
import random
try:
import pytest
except ImportError:
print >> sys.stderr, "Integ tests require pytests!"
sys.exit(1)
def pytest_funcarg__servers(request):
"Returns a new APIHandler with a filter manager"
# Create tmpdir and delete after
tmpdir = tempfile.mkdtemp()
# Make the command
output = "%s/output" % tmpdir
cmd = "cat >> %s" % output
# Write the configuration
port = random.randrange(10000, 65000)
config_path = os.path.join(tmpdir, "config.cfg")
conf = """[statsite]
flush_interval = 1
port = %d
udp_port = %d
stream_cmd = %s
quantiles = 0.5, 0.9, 0.95, 0.99
[histogram1]
prefix=has_hist
min=10
max=90
width=10
""" % (port, port, cmd)
open(config_path, "w").write(conf)
# Start the process
proc = subprocess.Popen(['./statsite', '-f', config_path])
proc.poll()
assert proc.returncode is None
# Define a cleanup handler
def cleanup():
try:
proc.kill()
proc.wait()
shutil.rmtree(tmpdir)
except:
print proc
pass
request.addfinalizer(cleanup)
# Make a connection to the server
connected = False
for x in xrange(3):
try:
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
conn.settimeout(1)
conn.connect(("localhost", port))
connected = True
break
except Exception, e:
print e
time.sleep(0.5)
# Die now
if not connected:
raise EnvironmentError("Failed to connect!")
# Make a second connection
conn2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
conn2.connect(("localhost", port))
# Return the connection
return conn, conn2, output
def wait_file(path, timeout=5):
"Waits on a file to be make"
start = time.time()
while not os.path.isfile(path) and time.time() - start < timeout:
time.sleep(0.1)
if not os.path.isfile(path):
raise Exception("Timed out waiting for file %s" % path)
while os.path.getsize(path) == 0 and time.time() - start < timeout:
time.sleep(0.1)
class TestInteg(object):
def test_kv(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("tubez:100|kv\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("kv.tubez|100.000000|%d\n" % now, "kv.tubez|100.000000|%d\n" % (now - 1))
def test_gauges(self, servers):
"Tests adding gauges"
server, _, output = servers
server.sendall("g1:1|g\n")
server.sendall("g1:50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.g1|50.000000|%d\n" % now, "gauges.g1|50.000000|%d\n" % (now - 1))
def test_gauges_delta(self, servers):
"Tests adding gauges"
server, _, output = servers
server.sendall("gd:+50|g\n")
server.sendall("gd:+50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|100.000000|%d\n" % now, "gauges.gd|100.000000|%d\n" % (now - 1))
def test_gauges_delta_neg(self, servers):
"Tests adding gauges"
server, _, output = servers
server.sendall("gd:-50|g\n")
server.sendall("gd:-50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|-100.000000|%d\n" % now, "gauges.gd|-100.000000|%d\n" % (now - 1))
def test_counters(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("foobar:100|c\n")
server.sendall("foobar:200|c\n")
server.sendall("foobar:300|c\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|600.000000|%d\n" % (now),
"counts.foobar|600.000000|%d\n" % (now - 1))
def test_counters_sample(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("foobar:100|c|@0.1\n")
server.sendall("foobar:200|c|@0.1\n")
server.sendall("foobar:300|c|@0.1\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|6000.000000|%d\n" % (now),
"counts.foobar|6000.000000|%d\n" % (now - 1))
def test_meters_alias(self, servers):
"Tests adding timing data with the 'h' alias"
server, _, output = servers
msg = ""
for x in xrange(100):
msg += "val:%d|h\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.val.sum|4950" in out
assert "timers.val.sum_sq|328350" in out
assert "timers.val.mean|49.500000" in out
assert "timers.val.lower|0.000000" in out
assert "timers.val.upper|99.000000" in out
assert "timers.val.count|100" in out
assert "timers.val.stdev|29.011492" in out
assert "timers.val.median|49.000000" in out
assert "timers.val.p90|90.000000" in out
assert "timers.val.p95|95.000000" in out
assert "timers.val.p99|99.000000" in out
assert "timers.val.rate|4950" in out
assert "timers.val.sample_rate|100" in out
def test_meters(self, servers):
"Tests adding kv pairs"
server, _, output = servers
msg = ""
for x in xrange(100):
msg += "noobs:%d|ms\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.noobs.sum|4950" in out
assert "timers.noobs.sum_sq|328350" in out
assert "timers.noobs.mean|49.500000" in out
assert "timers.noobs.lower|0.000000" in out
assert "timers.noobs.upper|99.000000" in out
assert "timers.noobs.count|100" in out
assert "timers.noobs.stdev|29.011492" in out
assert "timers.noobs.median|49.000000" in out
assert "timers.noobs.p90|90.000000" in out
assert "timers.noobs.p95|95.000000" in out
assert "timers.noobs.p99|99.000000" in out
assert "timers.noobs.rate|4950" in out
assert "timers.noobs.sample_rate|100" in out
def test_timers_with_rate(self, servers):
"Tests timers with sampling rate"
server, _, output = servers
msg = ""
for x in xrange(100):
msg += "withrate:%d|ms|@0.5\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.withrate.sum|4950" in out
assert "timers.withrate.sum_sq|328350" in out
assert "timers.withrate.mean|49.500000" in out
assert "timers.withrate.lower|0.000000" in out
assert "timers.withrate.upper|99.000000" in out
assert "timers.withrate.count|200" in out
assert "timers.withrate.stdev|29.011492" in out
assert "timers.withrate.median|49.000000" in out
assert "timers.withrate.p90|90.000000" in out
assert "timers.withrate.p95|95.000000" in out
assert "timers.withrate.p99|99.000000" in out
assert "timers.withrate.rate|4950" in out
assert "timers.withrate.sample_rate|200.0" in out
def test_histogram(self, servers):
"Tests adding keys with histograms"
server, _, output = servers
msg = ""
for x in xrange(100):
msg += "has_hist.test:%d|ms\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.has_hist.test.histogram.bin_<10.00|10" in out
assert "timers.has_hist.test.histogram.bin_10.00|10" in out
assert "timers.has_hist.test.histogram.bin_20.00|10" in out
assert "timers.has_hist.test.histogram.bin_30.00|10" in out
assert "timers.has_hist.test.histogram.bin_40.00|10" in out
assert "timers.has_hist.test.histogram.bin_50.00|10" in out
assert "timers.has_hist.test.histogram.bin_60.00|10" in out
assert "timers.has_hist.test.histogram.bin_70.00|10" in out
assert "timers.has_hist.test.histogram.bin_80.00|10" in out
assert "timers.has_hist.test.histogram.bin_>90.00|10" in out
def test_sets(self, servers):
"Tests adding kv pairs"
server, _, output = servers
server.sendall("zip:foo|s\n")
server.sendall("zip:bar|s\n")
server.sendall("zip:baz|s\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("sets.zip|3|%d\n" % now, "sets.zip|3|%d\n" % (now - 1))
def test_double_parsing(self, servers):
"Tests string to double parsing"
server, _, output = servers
server.sendall("int1:1|c\n")
server.sendall("decimal1:1.0|c\n")
server.sendall("decimal2:2.3456789|c\n")
server.sendall("scientific1:1.0e5|c\n")
server.sendall("scientific2:2.0e05|c\n")
server.sendall("scientific3:3.0E05|c\n")
server.sendall("scientific4:4.0e-5|c\n")
server.sendall("underflow1:1.964393875E-314|c\n")
wait_file(output)
out = open(output).read()
assert "counts.int1|1.000000|" in out
assert "counts.decimal1|1.000000|" in out
assert "counts.decimal2|2.345679|" in out
assert "counts.scientific1|100000.000000|" in out
assert "counts.scientific2|200000.000000|" in out
assert "counts.scientific3|300000.000000|" in out
assert "counts.scientific4|0.000040|" in out
assert "counts.underflow1|" not in out
# def test_condensed(self, servers):
# "Tests adding condensed stats"
# _, server, output = servers
# server.sendall("foo:4|c:5|ms:3|ms:7|g\n")
#
# wait_file(output)
# now = time.time()
# out = open(output).read()
# assert ("counts.foo|4.000") in out
# assert ("timers.foo.sum|8.000") in out
# assert ("gauges.foo|7.000") in out
class TestIntegUDP(object):
def test_kv(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("tubez:100|kv\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("kv.tubez|100.000000|%d\n" % now, "kv.tubez|100.000000|%d\n" % (now - 1))
def test_gauges(self, servers):
"Tests adding gauges"
_, server, output = servers
server.sendall("g1:1|g\n")
server.sendall("g1:50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.g1|50.000000|%d\n" % now, "gauges.g1|50.000000|%d\n" % (now - 1))
def test_gauges_delta(self, servers):
"Tests adding gauges"
_, server, output = servers
server.sendall("gd:+50|g\n")
server.sendall("gd:+50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|100.000000|%d\n" % now, "gauges.gd|100.000000|%d\n" % (now - 1))
def test_gauges_delta_neg(self, servers):
"Tests adding gauges"
_, server, output = servers
server.sendall("gd:-50|g\n")
server.sendall("gd:-50|g\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("gauges.gd|-100.000000|%d\n" % now, "gauges.gd|-100.000000|%d\n" % (now - 1))
def test_bad_kv(self, servers):
"Tests adding a bad value, followed by a valid kv pair"
_, server, output = servers
server.sendall("this is junk data\n")
server.sendall("tubez:100|kv\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("kv.tubez|100.000000|%d\n" % now, "kv.tubez|100.000000|%d\n" % (now - 1))
def test_counters(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("foobar:100|c\n")
server.sendall("foobar:200|c\n")
server.sendall("foobar:300|c\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|600.000000|%d\n" % (now),
"counts.foobar|600.000000|%d\n" % (now - 1))
def test_counters_signed(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("foobar:+100|c\n")
server.sendall("foobar:+200|c\n")
server.sendall("foobar:-50|c\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|250.000000|%d\n" % (now),
"counts.foobar|250.000000|%d\n" % (now - 1))
def test_counters_sample(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("foobar:100|c|@0.1\n")
server.sendall("foobar:200|c|@0.1\n")
server.sendall("foobar:300|c|@0.1\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|6000.000000|%d\n" % (now),
"counts.foobar|6000.000000|%d\n" % (now - 1))
def test_wrong_protocol_in_a_batch(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("foobar10c\nfoobar:10|c\nfoobar:10|c\n")
server.sendall("foobar:c\nfoobar:10|c\nfoobar:10|c\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.foobar|40.000000|%d\n" % (now),
"counts.foobar|40.000000|%d\n" % (now - 1))
def test_counters_no_newlines(self, servers):
"Tests adding counters without a trailing new line"
_, server, output = servers
server.sendall("zip:100|c")
server.sendall("zip:200|c")
server.sendall("zip:300|c")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("counts.zip|600.000000|%d\n" % (now),
"counts.zip|600.000000|%d\n" % (now - 1))
def test_meters(self, servers):
"Tests adding kv pairs"
_, server, output = servers
msg = ""
for x in xrange(100):
msg += "noobs:%d|ms\n" % x
server.sendall(msg)
wait_file(output)
out = open(output).read()
assert "timers.noobs.sum|4950" in out
assert "timers.noobs.sum_sq|328350" in out
assert "timers.noobs.mean|49.500000" in out
assert "timers.noobs.lower|0.000000" in out
assert "timers.noobs.upper|99.000000" in out
assert "timers.noobs.count|100" in out
assert "timers.noobs.stdev|29.011492" in out
assert "timers.noobs.median|49.000000" in out
assert "timers.noobs.p90|90.000000" in out
assert "timers.noobs.p95|95.000000" in out
assert "timers.noobs.p99|99.000000" in out
assert "timers.noobs.rate|4950" in out
assert "timers.noobs.sample_rate|100" in out
def test_sets(self, servers):
"Tests adding kv pairs"
_, server, output = servers
server.sendall("zip:foo|s\n")
server.sendall("zip:bar|s\n")
server.sendall("zip:baz|s\n")
wait_file(output)
now = time.time()
out = open(output).read()
assert out in ("sets.zip|3|%d\n" % now, "sets.zip|3|%d\n" % (now - 1))
class TestIntegBindAddress(object):
@contextlib.contextmanager
def run(self, addr, port=None):
port = port if port else random.randrange(10000, 65000)
fh = tempfile.NamedTemporaryFile()
conf = '''\
[statsite]
port = %d
udp_port = %s
bind_address = %s\n'''
fh.write(textwrap.dedent(conf % (port, port, addr)))
fh.flush()
try:
p = subprocess.Popen(['./statsite', '-f', fh.name])
time.sleep(0.3)
yield port
finally:
p.kill()
fh.close()
def islistening(self, addr, port, command='statsite'):
try:
cmd = ['lsof', '-FnPc', '-nP', '-i', '@%s:%s' % (addr, port)]
out = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
return (command in out) and ('PTCP' in out) and ('PUDP' in out)
def test_ipv4_localhost(self):
with self.run('127.0.0.1') as port:
assert self.islistening('127.0.0.1', port), 'not listening'
def test_ipv4_any(self):
with self.run('0.0.0.0') as port:
assert self.islistening('0.0.0.0', port), 'not listening'
def test_ipv4_bogus(self):
with self.run('a.b.c.d') as port:
assert not self.islistening('0.0.0.0', port), 'should not be listening'
with self.run('1.0.1.0') as port:
assert not self.islistening('1.0.1.0', port), 'should not be listening'
def test_ipv4_used(self):
try:
port = random.randrange(10000, 65000)
p = subprocess.Popen(['nc', '-l', '127.0.0.1', str(port)])
with self.run('127.0.0.1', port):
assert not self.islistening('127.0.0.0', port), 'should not be listening'
finally:
p.kill()
if __name__ == "__main__":
sys.exit(pytest.main(args="-k TestInteg."))
|
|
from django.http import HttpRequest
from django.test import TestCase, Client
from django.contrib.auth.models import User
from django.urls import reverse
from django.core import mail
from django.test.utils import override_settings
from lfs.addresses.models import Address
from lfs.addresses.utils import AddressManagement
from lfs.core.models import Country
from lfs.core.models import Shop
from lfs.customer.models import CreditCard
from lfs.customer.models import Customer
from lfs.customer.utils import create_unique_username
from lfs.customer.utils import create_customer
from lfs.shipping.models import ShippingMethod
from lfs.tax.models import Tax
from lfs.payment.models import PaymentMethod
from django.contrib.sessions.middleware import SessionMiddleware
class CreditCardTestCase(TestCase):
def setUp(self):
self.cc = CreditCard(
type="mastercard",
owner="John Doe",
number="4711",
expiration_date_month=8,
expiration_date_year=2012
)
def test_unicode(self):
self.assertEquals(self.cc.__str__(), "%s / %s" % (self.cc.type, self.cc.owner))
class CustomerTestCase(TestCase):
fixtures = ['lfs_shop.xml']
def setUp(self):
self.username = 'joe'
self.password = 'bloggs'
self.user = User(username=self.username)
self.user.set_password(self.password)
self.user.save()
ie = Country.objects.get(code="ie")
gb = Country.objects.get(code="gb")
de = Country.objects.get(code="de")
us = Country.objects.get(code="us")
fr = Country.objects.get(code="fr")
shop, created = Shop.objects.get_or_create(name="lfs test", shop_owner="John Doe", default_country=de)
shop.save()
shop.invoice_countries.add(ie)
shop.invoice_countries.add(gb)
shop.invoice_countries.add(de)
shop.invoice_countries.add(us)
shop.invoice_countries.add(fr)
shop.shipping_countries.add(ie)
shop.shipping_countries.add(gb)
shop.shipping_countries.add(de)
shop.shipping_countries.add(us)
shop.shipping_countries.add(fr)
shop.save()
tax = Tax.objects.create(rate=19)
ShippingMethod.objects.create(
name="Standard",
active=True,
price=1.0,
tax=tax
)
PaymentMethod.objects.create(
name="Direct Debit",
active=True,
tax=tax,
)
def test_create_customer(self):
request = HttpRequest()
request.user = self.user
middleware = SessionMiddleware()
middleware.process_request(request)
request.session.save()
self.assertEquals(Address.objects.count(), 0)
create_customer(request)
self.assertEquals(Address.objects.count(), 4)
class AddressTestCase(TestCase):
fixtures = ['lfs_shop.xml']
def setUp(self):
"""
"""
ie = Country.objects.get(code="ie")
gb = Country.objects.get(code="gb")
de = Country.objects.get(code="de")
us = Country.objects.get(code="us")
fr = Country.objects.get(code="fr")
shop, created = Shop.objects.get_or_create(name="lfs test", shop_owner="John Doe", default_country=de)
shop.save()
shop.invoice_countries.add(ie)
shop.invoice_countries.add(gb)
shop.invoice_countries.add(de)
shop.invoice_countries.add(us)
shop.invoice_countries.add(fr)
shop.shipping_countries.add(ie)
shop.shipping_countries.add(gb)
shop.shipping_countries.add(de)
shop.shipping_countries.add(us)
shop.shipping_countries.add(fr)
shop.save()
tax = Tax.objects.create(rate=19)
shipping_method = ShippingMethod.objects.create(
name="Standard",
active=True,
price=1.0,
tax=tax
)
payment_method = PaymentMethod.objects.create(
name="Direct Debit",
active=True,
tax=tax,
)
self.address1 = Address.objects.create(
firstname="John",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 42",
city="Gotham City",
zip_code="23422",
country=de,
phone="555-111111",
email="john@doe.com",
)
self.address2 = Address.objects.create(
firstname="Jane",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 43",
city="Smallville",
zip_code="24432",
country=de,
phone="666-111111",
email="jane@doe.com",
)
self.address3 = Address.objects.create(
firstname="John",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 42",
city="Gotham City",
zip_code="23422",
country=de,
phone="555-111111",
email="john@doe.com",
)
self.address4 = Address.objects.create(
firstname="Jane",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 43",
city="Smallville",
zip_code="24432",
country=de,
phone="666-111111",
email="jane@doe.com",
)
self.username = 'joe'
self.password = 'bloggs'
new_user = User(username=self.username)
new_user.set_password(self.password)
new_user.save()
self.customer = Customer.objects.create(
user=new_user,
selected_shipping_method=shipping_method,
selected_payment_method=payment_method,
selected_shipping_address=self.address1,
selected_invoice_address=self.address2,
default_shipping_address=self.address3,
default_invoice_address=self.address4,
)
def test_address_page(self):
"""
Tests that we can see a shipping and an invoice address
"""
# login as our customer
logged_in = self.client.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
address_response = self.client.get(reverse('lfs_my_addresses'))
# self.dump_response(address_response)
self.assertContains(address_response, 'Smallville', status_code=200)
self.assertContains(address_response, 'Gotham City', status_code=200)
def test_register_then_view_address(self):
"""Check we have a customer in database after registration"""
# we should have one customer starting
self.assertEqual(len(Customer.objects.all()), 1)
registration_response = self.client.post(reverse('lfs_login'), {'action': 'register', 'email': 'test@test.com', 'password_1': 'password', 'password_2': 'password'})
self.assertEquals(registration_response.status_code, 302)
self.assertEquals(registration_response._headers['location'], ('Location', '/'))
# Test that one message has been sent.
self.assertEquals(len(mail.outbox), 1)
# see if we can view the address page
address_response = self.client.get(reverse('lfs_my_addresses'))
self.assertContains(address_response, 'City', status_code=200)
# we should now have 2 customers
self.assertEqual(len(Customer.objects.all()), 2)
def dump_response(self, http_response):
fo = open('tests_customers.html', 'w')
fo.write(str(http_response))
fo.close()
def test_create_new_address(self):
# test that we have only 4 addresses registered (from setUp)
self.assertEquals(Address.objects.count(), 4)
# register a new user
registration_response = self.client.post(reverse('lfs_login'), {'action': 'register', 'email': 'test@test.com', 'password_1': 'password', 'password_2': 'password'})
self.assertEquals(registration_response.status_code, 302)
self.assertEquals(registration_response._headers['location'], ('Location', '/'))
self.assertEquals(Address.objects.count(), 8)
# Test that one message has been sent.
self.assertEquals(len(mail.outbox), 1)
our_user = User.objects.get(email='test@test.com')
our_customer = Customer.objects.get(user=our_user)
self.assertNotEquals(our_customer.selected_invoice_address, None)
self.assertNotEquals(our_customer.selected_shipping_address, None)
# see if we can view the addresss page
address_data = {
'invoice-firstname': 'Joe', 'invoice-lastname': 'Bloggs',
'invoice-line1': 'de company name', 'invoice-line2': 'de street',
'invoice-city': 'Dallas', 'invoice-state': 'TX',
'invoice-code': '84003', 'invoice-country': 'US',
'invoice-phone': '+49 4711 4711', 'invoice-email': 'joe.bloggs@acme.com',
'shipping-firstname': 'Joe', 'shipping-lastname': 'Bloggs',
'shipping-line1': 'de company name', 'shipping-line2': 'de street',
'shipping-city': 'Dallas', 'shipping-state': 'TX',
'shipping-code': '84003', 'shipping-country': 'US',
'shipping-phone': '+49 4712 4712', 'invoice-email': 'joe.bloggs@acme.com',
}
self.client.post(reverse('lfs_my_addresses'), address_data)
self.assertEquals(Address.objects.count(), 8)
# refetch our user from the database
our_customer = Customer.objects.get(user=our_user)
self.assertNotEquals(our_customer.selected_invoice_address, None)
self.assertNotEquals(our_customer.selected_shipping_address, None)
self.assertEquals(our_customer.selected_invoice_address.firstname, 'Joe')
self.assertEquals(our_customer.selected_invoice_address.lastname, 'Bloggs')
def _test_change_address_page(self):
"""
Tests that we can see a shipping and an invoice address
"""
# login as our customer
logged_in = self.client.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
iam = AddressManagement(self.customer, self.address2, "invoice")
sam = AddressManagement(self.customer, self.address1, "shipping")
iam_data = iam.get_address_as_dict()
sam_data = sam.get_address_as_dict()
data = {"invoice-firstname": "newname",
"invoice-lastname": self.address2.lastname,
"invoice-phone": self.address2.phone,
"invoice-email": self.address2.email,
"shipping-firstname": self.address1.firstname,
"shipping-lastname": self.address1.lastname,
"shipping-phone": self.address1.phone,
"shipping-email": self.address1.email,
}
for key, value in iam_data.items():
data['invoice-%s' % key] = value
for key, value in sam_data.items():
data['shipping-%s' % key] = value
data['invoice-country'] = 'AT'
response = self.client.post(reverse('lfs_my_addresses'), data=data, follow=True)
self.assertEqual(response.status_code, 200)
iam2 = Address.objects.get(pk=self.address2.pk)
self.assertEqual(iam2.firstname, "newname")
self.assertEqual(iam2.country.code.upper(), "AT")
class NoAutoUpdateAddressTestCase(TestCase):
fixtures = ['lfs_shop.xml']
def setUp(self):
"""
"""
ie = Country.objects.get(code="ie")
gb = Country.objects.get(code="gb")
de = Country.objects.get(code="de")
us = Country.objects.get(code="us")
fr = Country.objects.get(code="fr")
shop, created = Shop.objects.get_or_create(name="lfs test", shop_owner="John Doe", default_country=de)
shop.save()
shop.invoice_countries.add(ie)
shop.invoice_countries.add(gb)
shop.invoice_countries.add(de)
shop.invoice_countries.add(us)
shop.invoice_countries.add(fr)
shop.shipping_countries.add(ie)
shop.shipping_countries.add(gb)
shop.shipping_countries.add(de)
shop.shipping_countries.add(us)
shop.shipping_countries.add(fr)
shop.save()
tax = Tax.objects.create(rate=19)
shipping_method = ShippingMethod.objects.create(
name="Standard",
active=True,
price=1.0,
tax=tax
)
payment_method = PaymentMethod.objects.create(
name="Direct Debit",
active=True,
tax=tax,
)
self.address1 = Address.objects.create(
firstname="John",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 42",
city="Gotham City",
zip_code="23422",
country=de,
phone="555-111111",
email="john@doe.com",
)
self.address2 = Address.objects.create(
firstname="Jane",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 43",
city="Smallville",
zip_code="24432",
country=de,
phone="666-111111",
email="jane@doe.com",
)
self.address3 = Address.objects.create(
firstname="John",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 42",
city="Gotham City",
zip_code="23422",
country=de,
phone="555-111111",
email="john@doe.com",
)
self.address4 = Address.objects.create(
firstname="Jane",
lastname="Doe",
company_name="Doe Ltd.",
line1="Street 43",
city="Smallville",
zip_code="24432",
country=de,
phone="666-111111",
email="jane@doe.com",
)
self.username = 'joe'
self.password = 'bloggs'
new_user = User(username=self.username)
new_user.set_password(self.password)
new_user.save()
self.customer = Customer.objects.create(
user=new_user,
selected_shipping_method=shipping_method,
selected_payment_method=payment_method,
selected_shipping_address=self.address1,
selected_invoice_address=self.address2,
default_shipping_address=self.address3,
default_invoice_address=self.address4,
)
@override_settings(LFS_AUTO_UPDATE_DEFAULT_ADDRESSES=False)
def test_address_page(self):
"""
Tests that we can see a shipping and an invoice address
"""
# login as our customer
logged_in = self.client.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
address_response = self.client.get(reverse('lfs_my_addresses'))
# self.dump_response(address_response)
self.assertContains(address_response, 'Smallville', status_code=200)
self.assertContains(address_response, 'Gotham City', status_code=200)
@override_settings(LFS_AUTO_UPDATE_DEFAULT_ADDRESSES=False)
def test_register_then_view_address(self):
"""Check we have a customer in database after registration"""
# we should have one customer starting
self.assertEqual(len(Customer.objects.all()), 1)
registration_response = self.client.post(reverse('lfs_login'), {'action': 'register', 'email': 'test@test.com', 'password_1': 'password', 'password_2': 'password'})
self.assertEquals(registration_response.status_code, 302)
self.assertEquals(registration_response._headers['location'], ('Location', '/'))
# Test that one message has been sent.
self.assertEquals(len(mail.outbox), 1)
# see if we can view the address page
address_response = self.client.get(reverse('lfs_my_addresses'))
self.assertContains(address_response, 'City', status_code=200)
# we should now have 2 customers
self.assertEqual(len(Customer.objects.all()), 2)
def dump_response(self, http_response):
fo = open('tests_customers.html', 'w')
fo.write(str(http_response))
fo.close()
@override_settings(LFS_AUTO_UPDATE_DEFAULT_ADDRESSES=False)
def test_create_new_address(self):
# test that we have only 4 addresses registered (from setUp)
self.assertEquals(Address.objects.count(), 4)
# register a new user
registration_response = self.client.post(reverse('lfs_login'), {'action': 'register', 'email': 'test@test.com', 'password_1': 'password', 'password_2': 'password'})
self.assertEquals(registration_response.status_code, 302)
self.assertEquals(registration_response._headers['location'], ('Location', '/'))
self.assertEquals(Address.objects.count(), 8)
# Test that one message has been sent.
self.assertEquals(len(mail.outbox), 1)
our_user = User.objects.get(email='test@test.com')
our_customer = Customer.objects.get(user=our_user)
self.assertNotEquals(our_customer.selected_invoice_address, None)
self.assertNotEquals(our_customer.selected_shipping_address, None)
# see if we can view the addresss page
address_data = {
'invoice-firstname': 'Joe', 'invoice-lastname': 'Bloggs',
'invoice-line1': 'de company name', 'invoice-line2': 'de street',
'invoice-city': 'Dallas', 'invoice-state': 'TX',
'invoice-code': '84003', 'invoice-country': 'US',
'invoice-phone': '+49 4711 4711', 'invoice-email': 'joe.bloggs@acme.com',
'shipping-firstname': 'Joe', 'shipping-lastname': 'Bloggs',
'shipping-line1': 'de company name', 'shipping-line2': 'de street',
'shipping-city': 'Dallas', 'shipping-state': 'TX',
'shipping-code': '84003', 'shipping-country': 'US',
'shipping-phone': '+49 4712 4712', 'invoice-email': 'joe.bloggs@acme.com',
}
self.client.post(reverse('lfs_my_addresses'), address_data)
self.assertEquals(Address.objects.count(), 8)
# refetch our user from the database
our_customer = Customer.objects.get(user=our_user)
self.assertNotEquals(our_customer.selected_invoice_address, None)
self.assertNotEquals(our_customer.selected_shipping_address, None)
self.assertEquals(our_customer.selected_invoice_address.firstname, 'Joe')
self.assertEquals(our_customer.selected_invoice_address.lastname, 'Bloggs')
@override_settings(LFS_AUTO_UPDATE_DEFAULT_ADDRESSES=False)
def _test_change_address_page(self):
"""
Tests that we can see a shipping and an invoice address
"""
# login as our customer
logged_in = self.client.login(username=self.username, password=self.password)
self.assertEqual(logged_in, True)
iam = AddressManagement(self.customer, self.address2, "invoice")
sam = AddressManagement(self.customer, self.address1, "shipping")
iam_data = iam.get_address_as_dict()
sam_data = sam.get_address_as_dict()
data = {"invoice-firstname": "newname",
"invoice-lastname": self.address2.lastname,
"invoice-phone": self.address2.phone,
"invoice-email": self.address2.email,
"shipping-firstname": self.address1.firstname,
"shipping-lastname": self.address1.lastname,
"shipping-phone": self.address1.phone,
"shipping-email": self.address1.email,
}
for key, value in iam_data.items():
data['invoice-%s' % key] = value
for key, value in sam_data.items():
data['shipping-%s' % key] = value
data['invoice-country'] = 'AT'
response = self.client.post(reverse('lfs_my_addresses'), data=data, follow=True)
self.assertEqual(response.status_code, 200)
iam2 = Address.objects.get(pk=self.address2.pk)
self.assertEqual(iam2.firstname, "newname")
self.assertEqual(iam2.country.code.upper(), "AT")
class LoginTestCase(TestCase):
fixtures = ['lfs_shop.xml']
def test_register_customer(self):
client = Client()
response = client.get(reverse('lfs_login'))
self.assertEqual(response.status_code, 200)
self.assertFalse(User.objects.filter(username='test@example.com').exists())
response = client.post(reverse('lfs_login'), {'email': 'test@example.com',
'password_1': 'test',
'password_2': 'test',
'action': 'register',
'next': '/'})
self.assertTrue(User.objects.filter(username='test@example.com').exists())
response = client.post(reverse('lfs_login'), {'email': 'testverylongemailaddressthatislongerthanusername@example.com',
'password_1': 'test',
'password_2': 'test',
'action': 'register',
'next': '/'})
self.assertTrue(User.objects.filter(email='testverylongemailaddressthatislongerthanusername@example.com').exists())
u = User.objects.get(email='testverylongemailaddressthatislongerthanusername@example.com')
self.assertEqual(u.username, u.email[:30])
new_username = create_unique_username('testverylongemailaddressthatislongerthanusername2@example.com')
response = client.post(reverse('lfs_login'), {'email': 'testverylongemailaddressthatislongerthanusername2@example.com',
'password_1': 'test',
'password_2': 'test',
'action': 'register',
'next': '/'})
self.assertTrue(User.objects.filter(email='testverylongemailaddressthatislongerthanusername2@example.com').exists())
u = User.objects.get(email='testverylongemailaddressthatislongerthanusername2@example.com')
self.assertEqual(u.username, new_username)
def test_change_email(self):
u = User.objects.create(username="test@example.com", email="test@example.com", is_active=True)
u.set_password('test')
u.save()
client = Client()
client.login(username='test@example.com', password='test')
response = client.post(reverse('lfs_my_email'),
{'email': 'testverylongemailaddressthatislongerthanusername@example.com',
'action': 'email'})
self.assertEqual(response.status_code, 302)
self.assertTrue(User.objects.filter(email='testverylongemailaddressthatislongerthanusername@example.com').exists())
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from solum.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
# NOTE(flaper87): The following 3 globals are used by `mask_password`
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
|
|
#!/usr/bin/python
#title :gentotsv.py
#description :Script to process impute files .gz and create a csv file
#author :Diego Alvarez
#date :2016-06-05
#python_version :3.5
#==============================================================================
import gzip
import os
import fnmatch
import csv
import sys
import getopt
import time
import linecache
import utils
import config
from multiprocessing import Pool, Process
import multiprocessing
from functools import partial
def script_usage():
print 'gentotsv.py -h<help> -t<threads> -s<sourcedir> -d<destinationdir> -f<samplefile>'
print '---------'
print 'If no parameters are passed, default values are taken from <config.py>'
print 'Default #threads = #processor cores'
print '----------------'
return
def get_gen_file_columns(p_source_dir,p_source_file):
with gzip.open(p_source_dir+p_source_file,'rb') as genfile:
utils.log(logger,"GEN file: "+ p_source_file)
columns=genfile.readline().split()
totalcolumns = len(columns)
utils.log(logger,"Columns in GEN file: "+str(totalcolumns))
genfile.close()
return totalcolumns
def create_sample_file(p_source_dir,p_destination_dir, p_source_file, p_file_type):
utils.log(logger,"Begin - create_sample_file -")
samplecountlines = 0
source_file = utils.get_file_name(str(p_source_file))
with open(p_destination_dir+"SAM_"+source_file+p_file_type, 'wb') as xfile:
utils.log(logger,"Reading file SAMPLE: " + p_source_file)
csvwriter = csv.writer(xfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
with open(p_source_dir+p_source_file,'rb') as samplefile:
INDfilelist = []
for line in samplefile:
samplecountlines=samplecountlines+1
if samplecountlines <= 2:
seq=str(samplecountlines * (-1)).split()
columns=line.split()
csvwriter.writerow(seq+columns)
#Start counting individuals
if samplecountlines > 2:
seq=str(samplecountlines-2).split()
columns=line.split()
col01= columns[0:2] #to create the file ID
csvwriter.writerow(seq+columns)
#Create empty INDIVIDUAL file
INDfilename = create_individuals_file(p_destination_dir, seq[0]+"_"+col01[0]+"_"+col01[1], p_file_type)
#Create list with Individuals file
INDfilelist.append(INDfilename)
samplefile.close()
xfile.close()
utils.log(logger,"SAMPLE file lines: "+ str(samplecountlines))
utils.log(logger,"End - create_sample_file -")
return INDfilelist
def create_individuals_sample_files(p_source_dir,p_destination_dir, p_source_file, p_file_type):
utils.log(logger,"Begin - create_individuals_sample_files -")
samplecountlines = 0
source_file = utils.get_file_name(str(p_source_file))
INDfilelist = []
with open(p_source_dir+p_source_file,'rb') as samplefile:
for line in samplefile:
samplecountlines = samplecountlines + 1
columns = line.split()
if samplecountlines == 1:
headerline = columns[:]
elif samplecountlines == 2:
datatypeline = columns[:]
else:
individualline = samplecountlines - 2
with open(p_destination_dir+"SAM_"+str(individualline)+"_"+str(columns[0])+"_"+str(columns[1])+p_file_type, 'wb') as xfile:
csvwriter = csv.writer(xfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(0, len(columns)):
csvwriter.writerow([headerline[i]]+[datatypeline[i]]+[columns[i]])
#Create empty INDIVIDUAL file
INDfilename = create_individuals_file(p_destination_dir, str(individualline)+"_"+columns[0]+"_"+columns[1], p_file_type)
#Create list with Individuals file
INDfilelist.append(INDfilename)
xfile.close()
samplefile.close()
utils.log(logger,"SAMPLE file lines: "+ str(samplecountlines))
utils.log(logger,"End - create_individuals_sample_files -")
return INDfilelist
def create_snp_file(p_source_dir,p_destination_dir, p_source_file_type, p_dest_file_type):
utils.log(logger,"Begin - Create SNP file -")
filename = p_destination_dir+"SNP"+p_dest_file_type
open(filename, 'w').close()
for file_list in sorted(os.listdir(p_source_dir)):
if fnmatch.fnmatch(file_list,'*'+p_source_file_type):
with gzip.open(p_source_dir+file_list,'rb') as genfile:
sequence=0
gencountlines=0
utils.log(logger,"Reading file GEN: " + file_list)
with open(filename,'ab') as SNPfile:
csvwriter = csv.writer(SNPfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
#readlines() Loads full .gen file into memory and split in lines. To many threads
# or very big files can cause memory overflow.
#for line in genfile.readlines():
for line in genfile: #Read file line by line
gencountlines=gencountlines+1
columns=line.split()
col05=columns[0:5]
source_file = utils.get_file_name(file_list)
sequence=sequence+1
seq=str(sequence).split()
csvwriter.writerow([source_file]+seq+col05)
SNPfile.close()
genfile.close()
utils.log(logger,"End - Create SNP file -")
return
def create_individuals_file(p_destination_dir, p_filename, p_file_type):
filename = p_destination_dir+"IND_"+p_filename+p_file_type
open(filename, 'w').close()
return filename
def convert_cols_to_lines(p_source_dir,p_source_file,p_destination_dir,p_dest_file_list, p_individualsposlist, p_gen_column):
utils.log(logger,"Begin - convert_gen_cols_to_ind_lines - ")
positionindex = p_individualsposlist.index(p_gen_column)
regex = r"^{0}.*{1}$".format(p_destination_dir+"IND_"+str(positionindex+1)+"_",destination_file_type)
p_indfilename = utils.find_file_in_list(p_dest_file_list,regex)
source_file = utils.get_file_name(str(p_source_file))
try:
col = int(p_gen_column)
except:
e = sys.exc_info()[0]
utils.log(logger,e)
#Open individuals file
with open(p_indfilename,'a') as indfile:
utils.log(logger,"Writing IND .tsv file: "+ p_indfilename)
csvwriter = csv.writer(indfile,delimiter='\t',quotechar='"', quoting=csv.QUOTE_MINIMAL)
sequence = 0
with gzip.open(p_source_dir+p_source_file,'rb') as genfile:
for line in genfile: #reads line by line .gen file.
#readlines() loads full .gen file into memory and split in lines. To many threads
# or very big files can cause memory overflow.
#for line in genfile.readlines():
sequence=sequence+1
seq=str(sequence).split()
columns=line.split()
csvwriter.writerow([source_file]+seq+columns[col:col+3])
indfile.close()
utils.log(logger,"Lines in source file: "+ str(sequence))
genfile.close()
utils.log(logger,"End - convert_gen_cols_to_ind_lines - ")
return
def update_individuals_file(p_source_dir,p_source_file_type,p_destination_dir,p_dest_file_list):
utils.log(logger,"Begin - update_individuals_file -")
for file_list in sorted(os.listdir(p_source_dir)):
if fnmatch.fnmatch(file_list,'*'+p_source_file_type):
if __name__ =='__main__':
#with gzip.open(p_source_dir+file_list,'rb') as genfile:
#read only first line
genfile = gzip.open(p_source_dir+file_list,'rb')
columns=genfile.readline().split()
genfile_columns = len(columns)
genfile.close()
utils.log(logger, "numthreads: "+str(numthreads))
pool = Pool(int(numthreads))
utils.log(logger,"Reading GEN file: "+ file_list)
index =5
individualpos = 0
individualsposlist = []
#create list with all individuals position
while(index < genfile_columns):
individualsposlist.append(index)
index = index + 3
func = partial(convert_cols_to_lines,p_source_dir,file_list,p_destination_dir,p_dest_file_list,individualsposlist)
pool.map_async(func,individualsposlist).get(9999999)
utils.log(logger,"End - update_individuals_file -")
return
###########################################################################################################
###############################################Main function###############################################
###########################################################################################################
try:
print 'ARGV :', sys.argv[1:]
opts, args = getopt.getopt(sys.argv[1:], 'ht:s:d:f:', ['help=','threads=','sourcedir=','destinationdir=','samplefile='])
print 'OPTIONS :', opts
#Initialization
help=0
samplecount=0
samplecounttotal=0
gencount=0
gencounttotal=0
poscount=0
#Get default values
source_dir = config.source_dir_oxford
source_file_type = config.source_file_type_oxford
destination_dir = config.destination_dir_oxford
destination_file_type =config.destination_file_type_oxford
sample_file = config.sample_file_oxford
sample_file_format = config.sample_file_format_oxford
numthreads = multiprocessing.cpu_count()
#Pass the script name to Log
logger=utils.create_logger("gentotsv")
start_time = time.time()
print "Start time: "+time.ctime()
utils.log(logger, "Start time: "+time.ctime())
for opt,arg in opts:
if opt=='-h':
help = 1
script_usage()
elif opt=='-t':
global numtreads
numthreads = arg
elif opt=='-s':
source_dir = arg
elif opt=='-d':
destination_dir = arg
elif opt=='-f':
sample_file = arg
if help == 0:
print "Number of threads: "+str(numthreads)
utils.log(logger, "Number of threads: "+str(numthreads))
print "Sample file format: "+sample_file_format
utils.log(logger, "Sample file format: "+sample_file_format)
print "Source directory: "+source_dir
utils.log(logger, "Source directory: "+source_dir)
print "Destination directory: "+destination_dir
utils.log(logger, "Destination directory: "+destination_dir)
print "Sample file name: "+sample_file
utils.log(logger, "Sample file name: "+sample_file)
if not os.path.exists(source_dir):
utils.log(logger, "EXCEPTION - Source directory "+source_dir+" does not exist")
sys.exit("EXCEPTION - Source directory "+source_dir+" does not exist")
#Create destination directory
try:
os.makedirs(destination_dir)
except OSError as err:
pass
if os.path.isfile(source_dir+sample_file):
#----------------------
#Convert SAMPLE to TSV
# 1 file = 1 individual
#----------------------
INDfilelist = create_individuals_sample_files(source_dir,destination_dir,sample_file,destination_file_type)
# 2 threads. Parallel processing.
if __name__=='__main__':
#----------------------
#Create SNP file with 1st 5 columns of GEN file
#----------------------
#create_snp_file(source_dir,destination_dir,source_file_type, destination_file_type)
p1 = Process(target=create_snp_file, args=(source_dir,destination_dir,source_file_type, destination_file_type))
p1.start()
#----------------------
#Convert GEN to TSV
# 1 file = 1 individual
#----------------------
#update_individuals_file(source_dir,source_file_type,destination_dir,INDfilelist)
p2 = Process(target=update_individuals_file, args=(source_dir,source_file_type,destination_dir,INDfilelist))
p2.start()
p1.join()
p2.join()
else:
utils.log(logger," EXCEPTION - Sample File: " + sample_file + " does not exists")
sys.exit("Sample File: " + sample_file + " does not exists")
print time.ctime()
utils.log(logger, "End time: "+time.ctime())
except getopt.GetoptError as err:
print str(err)
utils.log(logger,str(err))
sys.exit(2)
except:
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
utils.log(logger,'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
sys.exit(2)
|
|
#!/usr/bin/env python
"""
Functions to do automatic visualization of activation-like maps.
For 2D-only visualization, only matplotlib is required.
For 3D visualization, Mayavi, version 3.0 or greater, is required.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD
# Standard library imports
import os
import sys
# Standard scientific libraries imports (more specific imports are
# delayed, so that the part module can be used without them).
import numpy as np
import matplotlib as mp
import pylab as pl
# Local imports
from nipy.neurospin.utils.mask import compute_mask
from nipy.io.imageformats import load
from anat_cache import mni_sform, mni_sform_inv, _AnatCache
from coord_tools import coord_transform, find_activation, \
find_cut_coords
class SformError(Exception):
pass
class NiftiIndexError(IndexError):
pass
################################################################################
# Colormaps
def _rotate_cmap(cmap, name=None, swap_order=('green', 'red', 'blue')):
""" Utility function to swap the colors of a colormap.
"""
orig_cdict = cmap._segmentdata.copy()
cdict = dict()
cdict['green'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[0]]]
cdict['blue'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[1]]]
cdict['red'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[2]]]
if name is None:
name = '%s_rotated' % cmap.name
return mp.colors.LinearSegmentedColormap(name, cdict, 512)
def _pigtailed_cmap(cmap, name=None,
swap_order=('green', 'red', 'blue')):
""" Utility function to make a new colormap by concatenating a
colormap with its reverse.
"""
orig_cdict = cmap._segmentdata.copy()
cdict = dict()
cdict['green'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])]
cdict['blue'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])]
cdict['red'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])]
for color in ('red', 'green', 'blue'):
cdict[color].extend([(0.5*(1+p), c1, c2)
for (p, c1, c2) in orig_cdict[color]])
if name is None:
name = '%s_reversed' % cmap.name
return mp.colors.LinearSegmentedColormap(name, cdict, 512)
# Using a dict as a namespace, to micmic matplotlib's cm
_cm = dict(
cold_hot = _pigtailed_cmap(pl.cm.hot, name='cold_hot'),
brown_blue = _pigtailed_cmap(pl.cm.bone, name='brown_blue'),
cyan_copper = _pigtailed_cmap(pl.cm.copper, name='cyan_copper'),
cyan_orange = _pigtailed_cmap(pl.cm.YlOrBr_r, name='cyan_orange'),
blue_red = _pigtailed_cmap(pl.cm.Reds_r, name='blue_red'),
brown_cyan = _pigtailed_cmap(pl.cm.Blues_r, name='brown_cyan'),
purple_green = _pigtailed_cmap(pl.cm.Greens_r, name='purple_green',
swap_order=('red', 'blue', 'green')),
purple_blue = _pigtailed_cmap(pl.cm.Blues_r, name='purple_blue',
swap_order=('red', 'blue', 'green')),
blue_orange = _pigtailed_cmap(pl.cm.Oranges_r, name='blue_orange',
swap_order=('green', 'red', 'blue')),
black_blue = _rotate_cmap(pl.cm.hot, name='black_blue'),
black_purple = _rotate_cmap(pl.cm.hot, name='black_purple',
swap_order=('blue', 'red', 'green')),
black_pink = _rotate_cmap(pl.cm.hot, name='black_pink',
swap_order=('blue', 'green', 'red')),
black_green = _rotate_cmap(pl.cm.hot, name='black_green',
swap_order=('red', 'blue', 'green')),
black_red = pl.cm.hot,
)
_cm.update(pl.cm.datad)
class _CM(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__.update(self)
cm = _CM(**_cm)
################################################################################
# 2D plotting of activation maps
################################################################################
def plot_map_2d(map, sform, cut_coords, anat=None, anat_sform=None,
vmin=None, figure_num=None, axes=None, title='',
mask=None, **kwargs):
""" Plot three cuts of a given activation map (Frontal, Axial, and Lateral)
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order.
anat : 3D ndarray, optional or False
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used. If False, no anat is displayed.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
figure_num : integer, optional
The number of the matplotlib figure used. If None is given, a
new figure is created.
axes : 4 tuple of float: (xmin, xmax, ymin, ymin), optional
The coordinates, in matplotlib figure space, of the axes
used to display the plot. If None, the complete figure is
used.
title : string, optional
The title dispayed on the figure.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.*
kwargs: extra keyword arguments, optional
Extra keyword arguments passed to pylab.imshow
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if anat is None:
anat, anat_sform, vmax_anat = _AnatCache.get_anat()
elif anat is not False:
vmax_anat = anat.max()
if mask is not None and (
np.all(mask) or np.all(np.logical_not(mask))):
mask = None
vmin_map = map.min()
vmax_map = map.max()
if vmin is not None and np.isfinite(vmin):
map = np.ma.masked_less(map, vmin)
elif mask is not None and not isinstance(map, np.ma.masked_array):
map = np.ma.masked_array(map, np.logical_not(mask))
vmin_map = map.min()
vmax_map = map.max()
if isinstance(map, np.ma.core.MaskedArray):
use_mask = False
if map._mask is False or np.all(np.logical_not(map._mask)):
map = np.asarray(map)
elif map._mask is True or np.all(map._mask):
map = np.asarray(map)
if use_mask and mask is not None:
map = np.ma.masked_array(map, np.logical_not(mask))
# Calculate the bounds
if anat is not False:
anat_bounds = np.zeros((4, 6))
anat_bounds[:3, -3:] = np.identity(3)*anat.shape
anat_bounds[-1, :] = 1
anat_bounds = np.dot(anat_sform, anat_bounds)
map_bounds = np.zeros((4, 6))
map_bounds[:3, -3:] = np.identity(3)*map.shape
map_bounds[-1, :] = 1
map_bounds = np.dot(sform, map_bounds)
# The coordinates of the center of the cut in different spaces.
y, x, z = cut_coords
x_map, y_map, z_map = [int(round(c)) for c in
coord_transform(x, y, z,
np.linalg.inv(sform))]
if anat is not False:
x_anat, y_anat, z_anat = [int(round(c)) for c in
coord_transform(x, y, z,
np.linalg.inv(anat_sform))]
fig = pl.figure(figure_num, figsize=(6.6, 2.6))
if axes is None:
axes = (0., 1., 0., 1.)
pl.clf()
ax_xmin, ax_xmax, ax_ymin, ax_ymax = axes
ax_width = ax_xmax - ax_xmin
ax_height = ax_ymax - ax_ymin
# Calculate the axes ratio size in a 'clever' way
if anat is not False:
shapes = np.array(anat.shape, 'f')
else:
shapes = np.array(map.shape, 'f')
shapes *= ax_width/shapes.sum()
###########################################################################
# Frontal
pl.axes([ax_xmin, ax_ymin, shapes[0], ax_height])
if anat is not False:
if y_anat < anat.shape[1]:
pl.imshow(np.rot90(anat[:, y_anat, :]),
cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[0, 3],
anat_bounds[0, 0],
anat_bounds[2, 0],
anat_bounds[2, 5]))
if y_map < map.shape[1]:
pl.imshow(np.rot90(map[:, y_map, :]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[0, 3],
map_bounds[0, 0],
map_bounds[2, 0],
map_bounds[2, 5]),
**kwargs)
pl.text(ax_xmin +shapes[0] + shapes[1] - 0.01, ax_ymin + 0.07, '%i' % x,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(z, xmin, xmax, color=(.5, .5, .5))
pl.vlines(-x, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
###########################################################################
# Lateral
pl.axes([ax_xmin + shapes[0], ax_ymin, shapes[1], ax_height])
if anat is not False:
if x_anat < anat.shape[0]:
pl.imshow(np.rot90(anat[x_anat, ...]), cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[1, 0],
anat_bounds[1, 4],
anat_bounds[2, 0],
anat_bounds[2, 5]))
if x_map < map.shape[0]:
pl.imshow(np.rot90(map[x_map, ...]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[1, 0],
map_bounds[1, 4],
map_bounds[2, 0],
map_bounds[2, 5]),
**kwargs)
pl.text(ax_xmin + shapes[-1] - 0.01, ax_ymin + 0.07, '%i' % y,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(z, xmin, xmax, color=(.5, .5, .5))
pl.vlines(y, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
###########################################################################
# Axial
pl.axes([ax_xmin + shapes[0] + shapes[1], ax_ymin, shapes[-1],
ax_height])
if anat is not False:
if z_anat < anat.shape[2]:
pl.imshow(np.rot90(anat[..., z_anat]),
cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[0, 0],
anat_bounds[0, 3],
anat_bounds[1, 0],
anat_bounds[1, 4]))
if z_map < map.shape[2]:
pl.imshow(np.rot90(map[..., z_map]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[0, 0],
map_bounds[0, 3],
map_bounds[1, 0],
map_bounds[1, 4]),
**kwargs)
pl.text(ax_xmax - 0.01, ax_ymin + 0.07, '%i' % z,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(y, xmin, xmax, color=(.5, .5, .5))
pl.vlines(x, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
pl.text(ax_xmin + 0.01, ax_ymax - 0.01, title,
horizontalalignment='left',
verticalalignment='top',
transform=fig.transFigure)
pl.axis('off')
def demo_plot_map_2d():
map = np.zeros((182, 218, 182))
# Color a asymetric rectangle around Broadman area 26:
x, y, z = -6, -53, 9
x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv)
map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1
map = np.ma.masked_less(map, 0.5)
plot_map_2d(map, mni_sform, cut_coords=(x, y, z),
figure_num=512)
def plot_map(map, sform, cut_coords, anat=None, anat_sform=None,
vmin=None, figure_num=None, title='', mask=None):
""" Plot a together a 3D volume rendering view of the activation, with an
outline of the brain, and 2D cuts. If Mayavi is not installed,
falls back to 2D views only.
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the cut to perform, in MNI coordinates
and order. If None is given, the cut_coords are automaticaly
estimated.
anat : 3D ndarray, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
title : string, optional
The title dispayed on the figure.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
try:
from enthought.mayavi import version
if not int(version.version[0]) > 2:
raise ImportError
except ImportError:
print >> sys.stderr, 'Mayavi > 3.x not installed, plotting only 2D'
return plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin,
title=title,
figure_num=figure_num, mask=mask)
from .maps_3d import plot_map_3d, m2screenshot
plot_map_3d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin,
figure_num=figure_num, mask=mask)
fig = pl.figure(figure_num, figsize=(10.6, 2.6))
ax = pl.axes((-0.01, 0, 0.3, 1))
m2screenshot(mpl_axes=ax)
plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin, mask=mask,
figure_num=fig.number, axes=(0.28, 1, 0, 1.), title=title)
def demo_plot_map():
map = np.zeros((182, 218, 182))
# Color a asymetric rectangle around Broadman area 26:
x, y, z = -6, -53, 9
x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv)
map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1
plot_map(map, mni_sform, cut_coords=(x, y, z), vmin=0.5,
figure_num=512)
def auto_plot_map(map, sform, vmin=None, cut_coords=None, do3d=False,
anat=None, anat_sform=None, title='',
figure_num=None, mask=None, auto_sign=True):
""" Automatic plotting of an activation map.
Plot a together a 3D volume rendering view of the activation, with an
outline of the brain, and 2D cuts. If Mayavi is not installed,
falls back to 2D views only.
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order. If None is given, the cut_coords are
automaticaly estimated.
do3d : boolean, optional
If do3d is True, a 3D plot is created if Mayavi is installed.
anat : 3D ndarray, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
title : string, optional
The title dispayed on the figure.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.
auto_sign : boolean, optional
If auto_sign is True, the sign of the activation is
automaticaly computed: negative activation can thus be
plotted.
Returns
-------
vmin : float
The lower threshold of the activation used.
cut_coords : 3-tuple of floats
The Talairach coordinates of the cut performed for the 2D
view.
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if do3d:
if do3d == 'offscreen':
try:
from enthought.mayavi import mlab
mlab.options.offscreen = True
except:
pass
plotter = plot_map
else:
plotter = plot_map_2d
if mask is None:
mask = compute_mask(map)
if vmin is None:
vmin = np.inf
pvalue = 0.04
while not np.isfinite(vmin):
pvalue *= 1.25
vmax, vmin = find_activation(map, mask=mask, pvalue=pvalue)
if not np.isfinite(vmin) and auto_sign:
if np.isfinite(vmax):
vmin = -vmax
if mask is not None:
map[mask] *= -1
else:
map *= -1
if cut_coords is None:
x, y, z = find_cut_coords(map, activation_threshold=vmin)
# XXX: Careful with Voxel/MNI ordering
y, x, z = coord_transform(x, y, z, sform)
cut_coords = (x, y, z)
plotter(map, sform, vmin=vmin, cut_coords=cut_coords,
anat=anat, anat_sform=anat_sform, title=title,
figure_num=figure_num, mask=mask)
return vmin, cut_coords
def plot_niftifile(filename, outputname=None, do3d=False, vmin=None,
cut_coords=None, anat_filename=None, figure_num=None,
mask_filename=None, auto_sign=True):
""" Given a nifti filename, plot a view of it to a file (png by
default).
Parameters
----------
filename : string
The name of the Nifti file of the map to be plotted
outputname : string, optional
The file name of the output file created. By default
the name of the input file with a png extension is used.
do3d : boolean, optional
If do3d is True, a 3D plot is created if Mayavi is installed.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order. If None is given, the cut_coords are
automaticaly estimated.
anat : string, optional
Name of the Nifti image file to be used as a background. If None,
the MNI152 T1 1mm template is used.
title : string, optional
The title dispayed on the figure.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
mask_filename : string, optional
Name of the Nifti file to be used as brain mask. If None, the
mask is computed from the map.
auto_sign : boolean, optional
If auto_sign is True, the sign of the activation is
automaticaly computed: negative activation can thus be
plotted.
Notes
-----
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if outputname is None:
outputname = os.path.splitext(filename)[0] + '.png'
if not os.path.exists(filename):
raise OSError, 'File %s does not exist' % filename
nim = load(filename)
sform = nim.get_affine()
if any(np.linalg.eigvals(sform)==0):
raise SformError, "sform affine is not inversible"
if anat_filename is not None:
anat_im = load(anat_filename)
anat = anat_im.data
anat_sform = anat_im.get_affine()
else:
anat = None
anat_sform = None
if mask_filename is not None:
mask_im = load(mask_filename)
mask = mask_im.data.astype(np.bool)
if not np.allclose(mask_im.get_affine(), sform):
raise SformError, 'Mask does not have same sform as image'
if not np.allclose(mask.shape, nim.data.shape[:3]):
raise NiftiIndexError, 'Mask does not have same shape as image'
else:
mask = None
output_files = list()
if nim.data.ndim == 3:
map = nim.data.T
auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords,
do3d=do3d, anat=anat, anat_sform=anat_sform, mask=mask,
title=os.path.basename(filename), figure_num=figure_num,
auto_sign=auto_sign)
pl.savefig(outputname)
output_files.append(outputname)
elif nim.data.ndim == 4:
outputname, outputext = os.path.splitext(outputname)
if len(nim.data) < 10:
fmt = '%s_%i%s'
elif len(nim.data) < 100:
fmt = '%s_%02i%s'
elif len(nim.data) < 1000:
fmt = '%s_%03i%s'
else:
fmt = '%s_%04i%s'
if mask is None:
mask = compute_mask(nim.data.mean(axis=0)).T
for index, data in enumerate(nim.data):
map = data.T
auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords,
do3d=do3d, anat=anat, anat_sform=anat_sform,
title='%s, %i' % (os.path.basename(filename), index),
figure_num=figure_num, mask=mask, auto_sign=auto_sign)
this_outputname = fmt % (outputname, index, outputext)
pl.savefig(this_outputname)
pl.clf()
output_files.append(this_outputname)
else:
raise NiftiIndexError, 'File %s: incorrect number of dimensions'
return output_files
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Target` class.
It is a central concept of Luigi and represents the state of the workflow.
"""
import abc
import io
import os
import random
import tempfile
import logging
from luigi import six
logger = logging.getLogger('luigi-interface')
@six.add_metaclass(abc.ABCMeta)
class Target(object):
"""
A Target is a resource generated by a :py:class:`~luigi.task.Task`.
For example, a Target might correspond to a file in HDFS or data in a database. The Target
interface defines one method that must be overridden: :py:meth:`exists`, which signifies if the
Target has been created or not.
Typically, a :py:class:`~luigi.task.Task` will define one or more Targets as output, and the Task
is considered complete if and only if each of its output Targets exist.
"""
@abc.abstractmethod
def exists(self):
"""
Returns ``True`` if the :py:class:`Target` exists and ``False`` otherwise.
"""
pass
class FileSystemException(Exception):
"""
Base class for generic file system exceptions.
"""
pass
class FileAlreadyExists(FileSystemException):
"""
Raised when a file system operation can't be performed because
a directory exists but is required to not exist.
"""
pass
class MissingParentDirectory(FileSystemException):
"""
Raised when a parent directory doesn't exist.
(Imagine mkdir without -p)
"""
pass
class NotADirectory(FileSystemException):
"""
Raised when a file system operation can't be performed because
an expected directory is actually a file.
"""
pass
@six.add_metaclass(abc.ABCMeta)
class FileSystem(object):
"""
FileSystem abstraction used in conjunction with :py:class:`FileSystemTarget`.
Typically, a FileSystem is associated with instances of a :py:class:`FileSystemTarget`. The
instances of the py:class:`FileSystemTarget` will delegate methods such as
:py:meth:`FileSystemTarget.exists` and :py:meth:`FileSystemTarget.remove` to the FileSystem.
Methods of FileSystem raise :py:class:`FileSystemException` if there is a problem completing the
operation.
"""
@abc.abstractmethod
def exists(self, path):
"""
Return ``True`` if file or directory at ``path`` exist, ``False`` otherwise
:param str path: a path within the FileSystem to check for existence.
"""
pass
@abc.abstractmethod
def remove(self, path, recursive=True, skip_trash=True):
""" Remove file or directory at location ``path``
:param str path: a path within the FileSystem to remove.
:param bool recursive: if the path is a directory, recursively remove the directory and all
of its descendants. Defaults to ``True``.
"""
pass
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
Create directory at location ``path``
Creates the directory at ``path`` and implicitly create parent
directories if they do not already exist.
:param str path: a path within the FileSystem to create as a directory.
:param bool parents: Create parent directories when necessary. When
parents=False and the parent directory doesn't
exist, raise luigi.target.MissingParentDirectory
:param bool raise_if_exists: raise luigi.target.FileAlreadyExists if
the folder already exists.
*Note*: This method is optional, not all FileSystem subclasses implements it.
*Note*: parents and raise_if_exists were added in August 2014. Some
implementations might not support these flags yet.
"""
raise NotImplementedError("mkdir() not implemented on {0}".format(self.__class__.__name__))
def isdir(self, path):
"""
Return ``True`` if the location at ``path`` is a directory. If not, return ``False``.
:param str path: a path within the FileSystem to check as a directory.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("isdir() not implemented on {0}".format(self.__class__.__name__))
def listdir(self, path):
"""Return a list of files rooted in path.
This returns an iterable of the files rooted at ``path``. This is intended to be a
recursive listing.
:param str path: a path within the FileSystem to list.
*Note*: This method is optional, not all FileSystem subclasses implements it.
"""
raise NotImplementedError("listdir() not implemented on {0}".format(self.__class__.__name__))
class FileSystemTarget(Target):
"""
Base class for FileSystem Targets like :class:`~luigi.file.LocalTarget` and :class:`~luigi.contrib.hdfs.HdfsTarget`.
A FileSystemTarget has an associated :py:class:`FileSystem` to which certain operations can be
delegated. By default, :py:meth:`exists` and :py:meth:`remove` are delegated to the
:py:class:`FileSystem`, which is determined by the :py:meth:`fs` property.
Methods of FileSystemTarget raise :py:class:`FileSystemException` if there is a problem
completing the operation.
"""
def __init__(self, path):
"""
Initializes a FileSystemTarget instance.
:param str path: the path associated with this FileSystemTarget.
"""
self.path = path
@abc.abstractproperty
def fs(self):
"""
The :py:class:`FileSystem` associated with this FileSystemTarget.
"""
raise
@abc.abstractmethod
def open(self, mode):
"""
Open the FileSystem target.
This method returns a file-like object which can either be read from or written to depending
on the specified mode.
:param str mode: the mode `r` opens the FileSystemTarget in read-only mode, whereas `w` will
open the FileSystemTarget in write mode. Subclasses can implement
additional options.
"""
pass
def exists(self):
"""
Returns ``True`` if the path for this FileSystemTarget exists; ``False`` otherwise.
This method is implemented by using :py:meth:`fs`.
"""
path = self.path
if '*' in path or '?' in path or '[' in path or '{' in path:
logger.warning("Using wildcards in path %s might lead to processing of an incomplete dataset; "
"override exists() to suppress the warning.", path)
return self.fs.exists(path)
def remove(self):
"""
Remove the resource at the path specified by this FileSystemTarget.
This method is implemented by using :py:meth:`fs`.
"""
self.fs.remove(self.path)
class AtomicLocalFile(io.BufferedWriter):
"""Abstract class to create Target that create
a tempoprary file in the local filesystem before
moving it to there final destination
This class is just for the writing part of the Target. See
:class:`luigi.file.LocalTarget` for example
"""
def __init__(self, path):
self.__tmp_path = self.generate_tmp_path(path)
self.path = path
super(AtomicLocalFile, self).__init__(io.FileIO(self.__tmp_path, 'w'))
def close(self):
super(AtomicLocalFile, self).close()
self.move_to_final_destination()
def generate_tmp_path(self, path):
return os.path.join(tempfile.gettempdir(), 'luigi-s3-tmp-%09d' % random.randrange(0, 1e10))
def move_to_final_destination(self):
raise NotImplementedError()
def __del__(self):
if os.path.exists(self.tmp_path):
os.remove(self.tmp_path)
@property
def tmp_path(self):
return self.__tmp_path
def __exit__(self, exc_type, exc, traceback):
" Close/commit the file if there are no exception "
if exc_type:
return
return super(AtomicLocalFile, self).__exit__(exc_type, exc, traceback)
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the timezone Windows Registry plugin."""
import unittest
from dfdatetime import filetime as dfdatetime_filetime
from dfwinreg import definitions as dfwinreg_definitions
from dfwinreg import fake as dfwinreg_fake
from plaso.parsers.winreg_plugins import timezone
from tests.parsers.winreg_plugins import test_lib
class WinRegTimezonePluginTest(test_lib.RegistryPluginTestCase):
"""Tests for the timezone Windows Registry plugin."""
def _CreateTestKey(self, key_path, time_string):
"""Creates Registry keys and values for testing.
Args:
key_path (str): Windows Registry key path.
time_string (str): key last written date and time.
Returns:
dfwinreg.WinRegistryKey: a Windows Registry key.
"""
filetime = dfdatetime_filetime.Filetime()
filetime.CopyFromDateTimeString(time_string)
registry_key = dfwinreg_fake.FakeWinRegistryKey(
'TimeZoneInformation', key_path=key_path,
last_written_time=filetime.timestamp, offset=153)
value_data = 'C:\\Downloads\\plaso-static.rar'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'1', data=value_data, data_type=dfwinreg_definitions.REG_SZ,
offset=612)
registry_key.AddValue(registry_value)
value_data = b'\xff\xff\xff\xc4'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'ActiveTimeBias', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = b'\xff\xff\xff\xc4'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'Bias', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = b'\xff\xff\xff\xc4'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'DaylightBias', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = '@tzres.dll,-321'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'DaylightName', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = (
b'\x00\x00\x03\x00\x05\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'DaylightStart', data=value_data,
data_type=dfwinreg_definitions.REG_BINARY)
registry_key.AddValue(registry_value)
value_data = b'\x00\x00\x00\x00'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'DynamicDaylightTimeDisabled', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = b'\x00\x00\x00\x00'
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'StandardBias', data=value_data,
data_type=dfwinreg_definitions.REG_DWORD_BIG_ENDIAN)
registry_key.AddValue(registry_value)
value_data = '@tzres.dll,-322'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'StandardName', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
value_data = (
b'\x00\x00\x0A\x00\x05\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'StandardStart', data=value_data,
data_type=dfwinreg_definitions.REG_BINARY)
registry_key.AddValue(registry_value)
value_data = 'W. Europe Standard Time'.encode('utf_16_le')
registry_value = dfwinreg_fake.FakeWinRegistryValue(
'TimeZoneKeyName', data=value_data,
data_type=dfwinreg_definitions.REG_SZ)
registry_key.AddValue(registry_value)
return registry_key
def testFilters(self):
"""Tests the FILTERS class attribute."""
plugin = timezone.WinRegTimezonePlugin()
key_path = (
'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\'
'TimeZoneInformation')
self._AssertFiltersOnKeyPath(plugin, key_path)
self._AssertNotFiltersOnKeyPath(plugin, 'HKEY_LOCAL_MACHINE\\Bogus')
def testProcessMock(self):
"""Tests the Process function on created key."""
key_path = (
'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\'
'TimeZoneInformation')
time_string = '2013-01-30 10:47:57'
registry_key = self._CreateTestKey(key_path, time_string)
plugin = timezone.WinRegTimezonePlugin()
storage_writer = self._ParseKeyWithPlugin(registry_key, plugin)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_configuration = (
'ActiveTimeBias: -60 '
'Bias: -60 '
'DaylightBias: -60 '
'DaylightName: @tzres.dll,-321 '
'DynamicDaylightTimeDisabled: 0 '
'StandardBias: 0 '
'StandardName: @tzres.dll,-322 '
'TimeZoneKeyName: W. Europe Standard Time')
expected_event_values = {
'configuration': expected_configuration,
'date_time': '2013-01-30 10:47:57.0000000',
'data_type': 'windows:registry:timezone',
'key_path': key_path}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
def testProcessFile(self):
"""Tests the Process function on registry file."""
test_file_entry = self._GetTestFileEntry(['SYSTEM'])
key_path = (
'HKEY_LOCAL_MACHINE\\System\\ControlSet001\\Control\\'
'TimeZoneInformation')
win_registry = self._GetWinRegistryFromFileEntry(test_file_entry)
registry_key = win_registry.GetKeyByPath(key_path)
plugin = timezone.WinRegTimezonePlugin()
storage_writer = self._ParseKeyWithPlugin(
registry_key, plugin, file_entry=test_file_entry)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 1)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_configuration = (
'ActiveTimeBias: 240 '
'Bias: 300 '
'DaylightBias: -60 '
'DaylightName: @tzres.dll,-111 '
'DynamicDaylightTimeDisabled: 0 '
'StandardBias: 0 '
'StandardName: @tzres.dll,-112 '
'TimeZoneKeyName: Eastern Standard Time')
expected_event_values = {
'configuration': expected_configuration,
'data_type': 'windows:registry:timezone',
'date_time': '2012-03-11 07:00:00.0006424',
'key_path': key_path}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for SSH connections."""
import os
import sys
import warnings
from base64 import decodebytes
from io import StringIO
from select import select
from typing import Any, Dict, Optional, Sequence, Tuple, Type, Union
import paramiko
from paramiko.config import SSH_PORT
from sshtunnel import SSHTunnelForwarder
from tenacity import Retrying, stop_after_attempt, wait_fixed, wait_random
if sys.version_info >= (3, 8):
from functools import cached_property
else:
from cached_property import cached_property
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
try:
from airflow.utils.platform import getuser
except ImportError:
from getpass import getuser # type: ignore[misc]
TIMEOUT_DEFAULT = 10
class SSHHook(BaseHook):
"""
Hook for ssh remote execution using Paramiko.
ref: https://github.com/paramiko/paramiko
This hook also lets you create ssh tunnel and serve as basis for SFTP file transfer
:param ssh_conn_id: :ref:`ssh connection id<howto/connection:ssh>` from airflow
Connections from where all the required parameters can be fetched like
username, password or key_file. Thought the priority is given to the
param passed during init
:param remote_host: remote host to connect
:param username: username to connect to the remote_host
:param password: password of the username to connect to the remote_host
:param key_file: path to key file to use to connect to the remote_host
:param port: port of remote host to connect (Default is paramiko SSH_PORT)
:param conn_timeout: timeout (in seconds) for the attempt to connect to the remote_host.
The default is 10 seconds. If provided, it will replace the `conn_timeout` which was
predefined in the connection of `ssh_conn_id`.
:param timeout: (Deprecated). timeout for the attempt to connect to the remote_host.
Use conn_timeout instead.
:param keepalive_interval: send a keepalive packet to remote host every
keepalive_interval seconds
:param banner_timeout: timeout to wait for banner from the server in seconds
"""
# List of classes to try loading private keys as, ordered (roughly) by most common to least common
_pkey_loaders: Sequence[Type[paramiko.PKey]] = (
paramiko.RSAKey,
paramiko.ECDSAKey,
paramiko.Ed25519Key,
paramiko.DSSKey,
)
_host_key_mappings = {
'rsa': paramiko.RSAKey,
'dss': paramiko.DSSKey,
'ecdsa': paramiko.ECDSAKey,
'ed25519': paramiko.Ed25519Key,
}
conn_name_attr = 'ssh_conn_id'
default_conn_name = 'ssh_default'
conn_type = 'ssh'
hook_name = 'SSH'
@staticmethod
def get_ui_field_behaviour() -> Dict[str, Any]:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema'],
"relabeling": {
'login': 'Username',
},
}
def __init__(
self,
ssh_conn_id: Optional[str] = None,
remote_host: str = '',
username: Optional[str] = None,
password: Optional[str] = None,
key_file: Optional[str] = None,
port: Optional[int] = None,
timeout: Optional[int] = None,
conn_timeout: Optional[int] = None,
keepalive_interval: int = 30,
banner_timeout: float = 30.0,
) -> None:
super().__init__()
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.username = username
self.password = password
self.key_file = key_file
self.pkey = None
self.port = port
self.timeout = timeout
self.conn_timeout = conn_timeout
self.keepalive_interval = keepalive_interval
self.banner_timeout = banner_timeout
self.host_proxy_cmd = None
# Default values, overridable from Connection
self.compress = True
self.no_host_key_check = True
self.allow_host_key_change = False
self.host_key = None
self.look_for_keys = True
# Placeholder for deprecated __enter__
self.client: Optional[paramiko.SSHClient] = None
# Use connection to override defaults
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if self.username is None:
self.username = conn.login
if self.password is None:
self.password = conn.password
if not self.remote_host:
self.remote_host = conn.host
if self.port is None:
self.port = conn.port
if conn.extra is not None:
extra_options = conn.extra_dejson
if "key_file" in extra_options and self.key_file is None:
self.key_file = extra_options.get("key_file")
private_key = extra_options.get('private_key')
private_key_passphrase = extra_options.get('private_key_passphrase')
if private_key:
self.pkey = self._pkey_from_private_key(private_key, passphrase=private_key_passphrase)
if "timeout" in extra_options:
warnings.warn(
'Extra option `timeout` is deprecated.'
'Please use `conn_timeout` instead.'
'The old option `timeout` will be removed in a future version.',
DeprecationWarning,
stacklevel=2,
)
self.timeout = int(extra_options['timeout'])
if "conn_timeout" in extra_options and self.conn_timeout is None:
self.conn_timeout = int(extra_options['conn_timeout'])
if "compress" in extra_options and str(extra_options["compress"]).lower() == 'false':
self.compress = False
host_key = extra_options.get("host_key")
no_host_key_check = extra_options.get("no_host_key_check")
if no_host_key_check is not None:
no_host_key_check = str(no_host_key_check).lower() == "true"
if host_key is not None and no_host_key_check:
raise ValueError("Must check host key when provided")
self.no_host_key_check = no_host_key_check
if (
"allow_host_key_change" in extra_options
and str(extra_options["allow_host_key_change"]).lower() == 'true'
):
self.allow_host_key_change = True
if (
"look_for_keys" in extra_options
and str(extra_options["look_for_keys"]).lower() == 'false'
):
self.look_for_keys = False
if host_key is not None:
if host_key.startswith("ssh-"):
key_type, host_key = host_key.split(None)[:2]
key_constructor = self._host_key_mappings[key_type[4:]]
else:
key_constructor = paramiko.RSAKey
decoded_host_key = decodebytes(host_key.encode('utf-8'))
self.host_key = key_constructor(data=decoded_host_key)
self.no_host_key_check = False
if self.timeout:
warnings.warn(
'Parameter `timeout` is deprecated.'
'Please use `conn_timeout` instead.'
'The old option `timeout` will be removed in a future version.',
DeprecationWarning,
stacklevel=1,
)
if self.conn_timeout is None:
self.conn_timeout = self.timeout if self.timeout else TIMEOUT_DEFAULT
if self.pkey and self.key_file:
raise AirflowException(
"Params key_file and private_key both provided. Must provide no more than one."
)
if not self.remote_host:
raise AirflowException("Missing required param: remote_host")
# Auto detecting username values from system
if not self.username:
self.log.debug(
"username to ssh to host: %s is not specified for connection id"
" %s. Using system's default provided by getpass.getuser()",
self.remote_host,
self.ssh_conn_id,
)
self.username = getuser()
user_ssh_config_filename = os.path.expanduser('~/.ssh/config')
if os.path.isfile(user_ssh_config_filename):
ssh_conf = paramiko.SSHConfig()
with open(user_ssh_config_filename) as config_fd:
ssh_conf.parse(config_fd)
host_info = ssh_conf.lookup(self.remote_host)
if host_info and host_info.get('proxycommand'):
self.host_proxy_cmd = host_info['proxycommand']
if not (self.password or self.key_file):
if host_info and host_info.get('identityfile'):
self.key_file = host_info['identityfile'][0]
self.port = self.port or SSH_PORT
@cached_property
def host_proxy(self) -> Optional[paramiko.ProxyCommand]:
cmd = self.host_proxy_cmd
return paramiko.ProxyCommand(cmd) if cmd else None
def get_conn(self) -> paramiko.SSHClient:
"""
Opens a ssh connection to the remote host.
:rtype: paramiko.client.SSHClient
"""
self.log.debug('Creating SSH client for conn_id: %s', self.ssh_conn_id)
client = paramiko.SSHClient()
if not self.allow_host_key_change:
self.log.warning(
"Remote Identification Change is not verified. "
"This won't protect against Man-In-The-Middle attacks"
)
client.load_system_host_keys()
if self.no_host_key_check:
self.log.warning("No Host Key Verification. This won't protect against Man-In-The-Middle attacks")
# Default is RejectPolicy
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
else:
if self.host_key is not None:
client_host_keys = client.get_host_keys()
if self.port == SSH_PORT:
client_host_keys.add(self.remote_host, self.host_key.get_name(), self.host_key)
else:
client_host_keys.add(
f"[{self.remote_host}]:{self.port}", self.host_key.get_name(), self.host_key
)
else:
pass # will fallback to system host keys if none explicitly specified in conn extra
connect_kwargs: Dict[str, Any] = dict(
hostname=self.remote_host,
username=self.username,
timeout=self.conn_timeout,
compress=self.compress,
port=self.port,
sock=self.host_proxy,
look_for_keys=self.look_for_keys,
banner_timeout=self.banner_timeout,
)
if self.password:
password = self.password.strip()
connect_kwargs.update(password=password)
if self.pkey:
connect_kwargs.update(pkey=self.pkey)
if self.key_file:
connect_kwargs.update(key_filename=self.key_file)
log_before_sleep = lambda retry_state: self.log.info(
"Failed to connect. Sleeping before retry attempt %d", retry_state.attempt_number
)
for attempt in Retrying(
reraise=True,
wait=wait_fixed(3) + wait_random(0, 2),
stop=stop_after_attempt(3),
before_sleep=log_before_sleep,
):
with attempt:
client.connect(**connect_kwargs)
if self.keepalive_interval:
# MyPy check ignored because "paramiko" isn't well-typed. The `client.get_transport()` returns
# type "Optional[Transport]" and item "None" has no attribute "set_keepalive".
client.get_transport().set_keepalive(self.keepalive_interval) # type: ignore[union-attr]
self.client = client
return client
def __enter__(self) -> 'SSHHook':
warnings.warn(
'The contextmanager of SSHHook is deprecated.'
'Please use get_conn() as a contextmanager instead.'
'This method will be removed in Airflow 2.0',
category=DeprecationWarning,
)
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
if self.client is not None:
self.client.close()
self.client = None
def get_tunnel(
self, remote_port: int, remote_host: str = "localhost", local_port: Optional[int] = None
) -> SSHTunnelForwarder:
"""
Creates a tunnel between two hosts. Like ssh -L <LOCAL_PORT>:host:<REMOTE_PORT>.
:param remote_port: The remote port to create a tunnel to
:param remote_host: The remote host to create a tunnel to (default localhost)
:param local_port: The local port to attach the tunnel to
:return: sshtunnel.SSHTunnelForwarder object
"""
if local_port:
local_bind_address: Union[Tuple[str, int], Tuple[str]] = ('localhost', local_port)
else:
local_bind_address = ('localhost',)
tunnel_kwargs = dict(
ssh_port=self.port,
ssh_username=self.username,
ssh_pkey=self.key_file or self.pkey,
ssh_proxy=self.host_proxy,
local_bind_address=local_bind_address,
remote_bind_address=(remote_host, remote_port),
logger=self.log,
)
if self.password:
password = self.password.strip()
tunnel_kwargs.update(
ssh_password=password,
)
else:
tunnel_kwargs.update(
host_pkey_directories=None,
)
client = SSHTunnelForwarder(self.remote_host, **tunnel_kwargs)
return client
def create_tunnel(
self, local_port: int, remote_port: int, remote_host: str = "localhost"
) -> SSHTunnelForwarder:
"""
Creates tunnel for SSH connection [Deprecated].
:param local_port: local port number
:param remote_port: remote port number
:param remote_host: remote host
:return:
"""
warnings.warn(
'SSHHook.create_tunnel is deprecated, Please'
'use get_tunnel() instead. But please note that the'
'order of the parameters have changed'
'This method will be removed in Airflow 2.0',
category=DeprecationWarning,
)
return self.get_tunnel(remote_port, remote_host, local_port)
def _pkey_from_private_key(self, private_key: str, passphrase: Optional[str] = None) -> paramiko.PKey:
"""
Creates appropriate paramiko key for given private key
:param private_key: string containing private key
:return: ``paramiko.PKey`` appropriate for given key
:raises AirflowException: if key cannot be read
"""
for pkey_class in self._pkey_loaders:
try:
key = pkey_class.from_private_key(StringIO(private_key), password=passphrase)
# Test it actually works. If Paramiko loads an openssh generated key, sometimes it will
# happily load it as the wrong type, only to fail when actually used.
key.sign_ssh_data(b'')
return key
except (paramiko.ssh_exception.SSHException, ValueError):
continue
raise AirflowException(
'Private key provided cannot be read by paramiko.'
'Ensure key provided is valid for one of the following'
'key formats: RSA, DSS, ECDSA, or Ed25519'
)
def exec_ssh_client_command(
self,
ssh_client: paramiko.SSHClient,
command: str,
get_pty: bool,
environment: Optional[dict],
timeout: Optional[int],
) -> Tuple[int, bytes, bytes]:
self.log.info("Running command: %s", command)
# set timeout taken as params
stdin, stdout, stderr = ssh_client.exec_command(
command=command,
get_pty=get_pty,
timeout=timeout,
environment=environment,
)
# get channels
channel = stdout.channel
# closing stdin
stdin.close()
channel.shutdown_write()
agg_stdout = b''
agg_stderr = b''
# capture any initial output in case channel is closed already
stdout_buffer_length = len(stdout.channel.in_buffer)
if stdout_buffer_length > 0:
agg_stdout += stdout.channel.recv(stdout_buffer_length)
# read from both stdout and stderr
while not channel.closed or channel.recv_ready() or channel.recv_stderr_ready():
readq, _, _ = select([channel], [], [], timeout)
for recv in readq:
if recv.recv_ready():
line = stdout.channel.recv(len(recv.in_buffer))
agg_stdout += line
self.log.info(line.decode('utf-8', 'replace').strip('\n'))
if recv.recv_stderr_ready():
line = stderr.channel.recv_stderr(len(recv.in_stderr_buffer))
agg_stderr += line
self.log.warning(line.decode('utf-8', 'replace').strip('\n'))
if (
stdout.channel.exit_status_ready()
and not stderr.channel.recv_stderr_ready()
and not stdout.channel.recv_ready()
):
stdout.channel.shutdown_read()
try:
stdout.channel.close()
except Exception:
# there is a race that when shutdown_read has been called and when
# you try to close the connection, the socket is already closed
# We should ignore such errors (but we should log them with warning)
self.log.warning("Ignoring exception on close", exc_info=True)
break
stdout.close()
stderr.close()
exit_status = stdout.channel.recv_exit_status()
return exit_status, agg_stdout, agg_stderr
|
|
# -*- coding: utf-8 -*-
"""
Sahana Eden Vulnerability Controller
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
# @ToDo: deployment_setting
#countries = ["TL", "VN"]
countries = ["VN"]
# -----------------------------------------------------------------------------
def index():
""" Module Home Page: Map """
# This module uses it's own Theme
settings.base.theme = "Vulnerability"
# Additional scripts
append = s3.scripts.append
append("/%s/static/scripts/yepnope.1.5.4-min.js" % appname)
if s3.debug:
append("/%s/static/scripts/jquery.ui.selectmenu.js" % appname)
append("/%s/static/scripts/jquery.ui.progressbar.js" % appname)
append("/%s/static/scripts/TypeHelpers.js" % appname)
append("/%s/static/scripts/S3/s3.vulnerability.js" % appname)
append("/%s/static/scripts/S3/s3.dataTables.js" % appname)
append("/%s/static/scripts/jquery.dataTables.js" % appname)
append("/%s/static/scripts/jquery.dataTables.fnSetFilteringDelay.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.fillbetween.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.crosshair.js" % appname)
else:
append("/%s/static/scripts/S3/s3.vulnerability.min.js" % appname)
append("/%s/static/scripts/S3/s3.dataTables.min.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.min.js" % appname)
append("/%s/static/scripts/flot/jquery.flot.crosshair.min.js" % appname)
js_global = []
append = js_global.append
# i18n
i18n = "\n".join((
"S3.i18n.gis_requires_login='%s'" % T("Requires Login"),
"S3.i18n.no_matching_result='%s'" % T("No matching result"),
"S3.i18n.no_entries_found='%s'" % T("No Entries Found"),
"S3.i18n.loading_report_details='%s'" % T("Loading report details"),
"S3.i18n.choose='%s'" % T("Choose"),
"S3.i18n.population='%s'" % T("Population"),
"S3.i18n.reported='%s'" % T("Reported"),
"S3.i18n.country='%s'" % COUNTRY,
"S3.i18n.country_in='%s'" % T("Country in"),
"S3.i18n.show_more='%s'" % T("Show more"),
"S3.i18n.show_less='%s'" % T("Show less"),
"S3.i18n.submit_data='%s'" % T("Submit Data"),
"S3.i18n.analysis='%s'" % T("Analysis"),
"S3.i18n.reports='%s'" % T("Reports"),
"S3.i18n.all_reports='%s'" % T("All reports"),
"S3.i18n.my_reports='%s'" % T("My reports"),
"S3.i18n.approval_request_submitted='%s'" % T("Approval request submitted"),
"S3.i18n.thankyou_for_your_approval='%s'" % T("Thank you for your approval"),
"S3.i18n.reject_request_submitted='%s'" % T("Reject request submitted"),
"S3.i18n.submission_has_been_declined='%s'" % T("Thank you, the submission%(br)shas been declined") % dict(br="<br />"),
"S3.i18n.last_data_collected_on='%s'" % T("Last Data Collected on"),
"S3.i18n.by='%s'" % T("by"),
"S3.i18n.in_='%s'" % T("in"),
"S3.i18n.in_this='%s'" % T("in this"),
"S3.i18n.of='%s'" % T("of"),
"S3.i18n.out_of='%s'" % T("out of"),
"S3.i18n.review='%s'" % T("Review"),
"S3.i18n.go_to_the='%s'" % T("Go to the"),
"S3.i18n.select_data_type='%s'" % T("Select data type"),
"S3.i18n.about_to_submit_indicator_ratings='%s'" % T("You are about to submit indicator ratings for"),
"S3.i18n.poor='%s'" % T("poor"),
"S3.i18n.fair='%s'" % T("fair"),
"S3.i18n.moderate='%s'" % T("moderate"),
"S3.i18n.strong='%s'" % T("strong"),
"S3.i18n.data_quality='%s'" % T("Data Quality"),
"S3.i18n.of_total_data_reported='%s'" % T("of total data reported"),
"S3.i18n.uploading_report_details='%s'" % T("Uploading report details"),
"S3.i18n.upload_successful='%s'" % T("Upload successful"),
))
append(i18n)
# Get the L0 hdata & summary vdata
hdata, vdata = l0()
# Get the default location to open the map
bounds = None
root_org = auth.root_org()
start = False
if root_org:
otable = s3db.org_organisation
ttable = s3db.gis_location_tag
gtable = s3db.gis_location
query = (otable.id == root_org) & \
(ttable.tag == "ISO2") & \
(ttable.value == otable.country)
r = db(query).select(ttable.location_id,
limitby=(0, 1)).first()
if r and r.location_id in countries:
start = True
append('''\nstart=%s''' % r.location_id)
# Add the child L1 summary vdata
l1(r.location_id, vdata)
if not start:
append('''\nstart=""''')
dumps = json.dumps
script = '''
hdata=%s
vdata=%s
''' % (dumps(hdata), dumps(vdata))
append(script)
# Get the list of indicators
itable = s3db.vulnerability_indicator
query = (itable.deleted == False)
rows = db(query).select(itable.name,
itable.description,
itable.parameter_id,
orderby=itable.posn)
indicators = OrderedDict()
count = 1
for row in rows:
indicators[count] = dict(i=row.parameter_id,
n=row.name,
d=row.description)
count += 1
append('''idata=%s''' % json.dumps(indicators))
s3.js_global.append("".join(js_global))
# Reports
from s3.s3utils import S3DataTable
resource = s3db.resource("stats_group")
list_fields = ["id",
"date",
"location_id",
"location_id$L2",
"group",
"group_type_id",
"created_by",
"approved_by",
]
rfields = resource.resolve_selectors(list_fields)[0]
filteredrows = resource.count()
dt = S3DataTable(rfields, [])
level_1_titles = [["Approval pending", T("Approval pending")],
["VCA Report",T("VCA Report")],
["Report",T("Report")],
]
report = dt.html(filteredrows,
filteredrows,
"report",
dt_pagination = "false",
dt_bFilter = "false",
dt_sDom = "t",
dt_group = [4, 3],
dt_group_totals = [level_1_titles],
dt_ajax_url = URL(c="vulnerability",
f="report",
extension="aadata",
vars={"id": "report"},
),
dt_action_col = -1,
dt_group_space = "true",
dt_shrink_groups = "accordion",
dt_group_types = ["text", "none"],
)
s3.report = report
user = auth.user
if user:
user_name = "%s %s" % (user.first_name, user.last_name)
else:
user_name = ""
today = request.utcnow.strftime("%d-%b-%y")
response.view = "vulnerability/map.html"
return dict(indicators=indicators,
user_name = user_name,
today = today,
COUNTRY = COUNTRY.upper(),
CHOOSE_COUNTRY = T("Choose Country"))
# -----------------------------------------------------------------------------
def init():
"""
Create the static GeoJSONs that the app needs
"""
gis.export_admin_areas(countries)
return "complete"
# -----------------------------------------------------------------------------
def l0():
"""
Return hdata (Hierarchy Labels) & summary vdata (Resilience) for all Countries
- used only by the initial map load
"""
gtable = s3db.gis_location
ttable = s3db.gis_location_tag
htable = s3db.gis_hierarchy
query = (gtable.id == ttable.location_id) & \
(ttable.tag == "ISO2") & \
(ttable.value.belongs(countries)) & \
(gtable.id == htable.location_id)
stable = s3db.stats_aggregate
lquery = (stable.parameter_id == s3db.vulnerability_resilience_id()) & \
(stable.agg_type == 4) & \
(stable.location_id == gtable.id)
left = stable.on(lquery)
hdata = {}
vdata = {}
ids = []
append = ids.append
rows = db(query).select(gtable.id,
gtable.name,
htable.L1,
htable.L2,
htable.L3,
#htable.L4,
stable.date,
stable.mean,
orderby=~stable.date,
left=left)
for row in rows:
id = row[gtable].id
if id in ids:
# We're only interested in the most recent data per location
continue
append(id)
_grow = row[gtable]
_hrow = row[htable]
hdata[id] = dict(l1 = _hrow.L1,
l2 = _hrow.L2,
l3 = _hrow.L3,
#l4 = _hrow.L4,
)
mean = row[stable].mean
if mean is None:
resilience = 0
else:
resilience = int(round(mean, 0))
vdata[id] = dict(r = resilience,
n = _grow.name,
l = 0,
)
return hdata, vdata
# -----------------------------------------------------------------------------
def l1(id, vdata):
"""
Update summary vdata (Resilience) for all child L1s of the start country
- used only by the initial map load
"""
gtable = db.gis_location
query = (gtable.parent == id) & \
(gtable.level == "L1")
atable = db.vulnerability_aggregated_indicator
stable = db.stats_aggregate
rquery = (atable.name == "Resilience") & \
(stable.parameter_id == atable.parameter_id) & \
(stable.agg_type == 4)
rows = db(query).select(gtable.id,
gtable.name,
)
for row in rows:
query = rquery & (stable.location_id == row.id)
_row = db(query).select(stable.date,
stable.mean,
orderby=~stable.date).first()
resilience = None
if _row and _row.mean is not None:
resilience = int(round(_row.mean, 0))
vdata[row.id] = dict(r = resilience,
n = row.name,
l = 1,
f = id,
)
return
# -----------------------------------------------------------------------------
def vdata():
"""
Return JSON of the Vulnerability data for a location
- for display in Map Popups and the Drawer
vdata = { id : {
'n' : name,
'l' : level,
'f' : parent,
'r' : resilience,
'i' : indicator data,
'c' : count (how many L3s reported in this region),
't' : count (how many L3s total in this region),
'q' : quality,
'p' : population,
's' : source (for population),
'b' : population breakdown (for L3s),
'd' : date last collected (for L3s),
'w' : collected by (for L3s),
}
}
"""
try:
id = request.args[0]
except:
raise HTTP(400)
gtable = s3db.gis_location
query = (gtable.id == id)
row = db(query).select(gtable.name,
gtable.level,
gtable.parent,
gtable.L0,
gtable.L1,
gtable.L2,
#gtable.L3,
limitby=(0, 1)).first()
if not row or not row.level:
return ""
script = ""
data = dict(
n = row.name,
l = int(row.level[1]),
f = row.parent,
)
vdata = {}
ids = []
append = ids.append
level = row.level
l0_name = row.L0
l1_name = row.L1
l2_name = row.L2
#l3_name = row.L3
stable = s3db.stats_aggregate
vtable = s3db.vulnerability_data
srctable = s3db.stats_group
resilience_id = s3db.vulnerability_resilience_id()
if level != "L3":
# We need to read the names & resilience of the next level down for the popup dropdown selector
_level = int(level[1]) + 1
query = (gtable.parent == id) & \
(gtable.level == "L%s" % _level) & \
(gtable.deleted != True)
lquery = (stable.parameter_id == resilience_id) & \
(stable.agg_type == 4) & \
(stable.end_date == None) & \
(stable.location_id == gtable.id)
left = stable.on(lquery)
rows = db(query).select(gtable.id,
gtable.name,
stable.date,
stable.mean,
stable.ward_count,
stable.reported_count,
left=left)
for row in rows:
_id = row[gtable].id
append(_id)
mean = row[stable].mean
if mean is None:
resilience = 0
else:
resilience = int(round(mean, 0))
vdata[_id] = dict(r = resilience,
n = row[gtable].name,
l = _level,
f = id,
)
else:
# We are an L3 already
# Last Data Collected on t by c
# @ToDo: This probably won't be the correct person & if it is, it will need formatting using s3_fullname
query = (vtable.location_id == id)
row = db(query).select(vtable.date,
srctable.created_by,
orderby=~srctable.date,
limitby=(0, 1)).first()
if row:
data["d"] = row[vtable].date.isoformat()
data["w"] = row[srctable].created_by
else:
data["d"] = None
data["w"] = None
# Get the Resilience
query = (stable.parameter_id == resilience_id) & \
(stable.agg_type == 4) & \
(stable.end_date == None) & \
(stable.location_id == id) & \
(stable.deleted != True)
r = db(query).select(stable.date,
stable.mean,
stable.ward_count,
stable.reported_count,
orderby=~stable.date,
limitby=(0, 1)).first()
if not r or r.mean is None:
data["r"] = 0
if level != "L3":
data["c"] = 0
data["q"] = "p"
# Total number of L3s in this region
# @ToDo: Below L0 we cannot guarantee uniqueness of Lx names
query = (gtable.level == "L3") & \
(gtable.deleted != True) & \
(gtable.L0 == l0_name)
if level == "L1":
query = query & (gtable.L1 == l1_name)
elif level == "L2":
query = query & (gtable.L1 == l1_name) & \
(gtable.L2 == l2_name)
#elif level == "L3":
# query = query & (gtable.L1 == l1_name) & \
# (gtable.L2 == l2_name) & \
# (gtable.L3 == l3_name)
ward_count = db(query).count()
data["t"] = ward_count
else:
data["r"] = int(round(r.mean, 0))
# How many L3s have reported?
reported_count = r.reported_count
data["c"] = reported_count
# Total number of L3s in this region
ward_count = r.ward_count
data["t"] = ward_count
if level != "L3":
# Calculate Quality
if reported_count == 0 or ward_count == 0:
q = "p"
else:
q = reported_count / ward_count * 100
if q < 25:
q = "p"
elif q < 50:
q = "f"
elif q < 75:
q = "m"
else:
q = "s"
data["q"] = q
# Get the list of indicators
indicator_pids = s3db.vulnerability_ids()
# Get the aggregated data for this location for all indicators
query = (stable.location_id == id) & \
(stable.parameter_id.belongs(indicator_pids))
rows = db(query).select(stable.parameter_id,
stable.min,
stable.max,
stable.median)
indicator_data = {}
for row in rows:
indicator_data[row.parameter_id] = dict(
min = row.min,
max = row.max,
med = row.median,
)
data["i"] = indicator_data
# Get the Demographic data for the location
dtable = s3db.stats_demographic
ddtable = s3db.stats_demographic_data
doctable = s3db.doc_document
query = (dtable.name == "Population") & \
(ddtable.location_id == id) & \
(ddtable.parameter_id == dtable.parameter_id) & \
(ddtable.group_id == srctable.id) & \
(doctable.source_id == srctable.source_id)
row = db(query).select(ddtable.value,
doctable.name,
orderby=~ddtable.date,
limitby=(0, 1)).first()
if row:
p = row[ddtable].value
if p:
p = int(p)
data["p"] = p
data["s"] = row[doctable].name
else:
data["p"] = ""
data["s"] = ""
if level == "L3":
# Add breakdowns
query = (dtable.name != "Population") & \
(ddtable.location_id == id) & \
(ddtable.parameter_id == dtable.parameter_id) & \
(ddtable.group_id == srctable.id) & \
(doctable.source_id == srctable.source_id)
rows = db(query).select(dtable.id,
dtable.name,
ddtable.value,
#ddtable.date,
orderby=~ddtable.date
)
b = {}
ids = []
append = ids.append
for row in rows:
id = row[dtable].id
if id in ids:
# We're only interested in the most recent data per demographic
continue
append(id)
b[row[dtable].id] = dict(
n = str(T(row[dtable].name)),
v = row[ddtable].value,
s = row[doctable].name,
)
data["b"] = b
vdata[id] = data
script = '''n=%s\n''' % json.dumps(vdata)
response.headers["Content-Type"] = "application/json"
return script
# -----------------------------------------------------------------------------
def rdata():
"""
Controller to extract data for resilience analysis line graph
returns a JavaScript like:
r={"location_id":
{"year":
{"indicator_index": [value, deviation]}
}
}
where indicator_index is 0 for the overall resilience (mean), or
1-10 for the individual indicators (=index in the list + 1).
Any data which are not available from the db will be omitted (to
save bandwidth) - the client-side script must detect any missing
keys itself.
@todo: this controller must make sure that there is always a mean
(overall resilience) in each set => calculate if not present.
"""
response.headers["Content-Type"] = "application/json"
if not len(request.args):
return '''n={}'''
else:
locations = list(set([a for a in request.args if a.isdigit()]))
vars = request.get_vars
fyear = None
lyear = None
if "after" in vars:
try:
fyear = int(vars["after"])
except ValueError:
pass
if "before" in vars:
try:
lyear = int(vars["before"])
except ValueError:
pass
if lyear and fyear and lyear > fyear:
lyear, fyear = fyear, lyear
if fyear:
fdate = datetime.datetime(fyear, 1, 1)
else:
fdate = None
if lyear:
ldate = datetime.datetime(lyear+1, 1, 1)
else:
ldate = request.utcnow
resilience_id = s3db.vulnerability_resilience_id()
indicator_pids = s3db.vulnerability_ids()
pos = Storage([(indicator_pids[i], i+1)
for i in xrange(len(indicator_pids))])
pos[resilience_id] = 0
stable = s3db.stats_aggregate
query = (stable.deleted != True) & \
(((stable.parameter_id == resilience_id) & \
(stable.agg_type == 4)) |
(stable.parameter_id.belongs(indicator_pids)))
if len(locations) == 1:
query &= (stable.location_id == locations[0])
else:
query &= (stable.location_id.belongs(locations))
if fyear:
query &= (stable.date >= fdate)
if lyear is None or lyear == request.utcnow.year:
query &= ((stable.end_date < ldate) | (stable.end_date == None))
else:
query &= (stable.end_date < ldate)
rows = db(query).select(stable.location_id,
stable.parameter_id,
stable.date,
stable.mean,
stable.median,
stable.mad,
orderby=~stable.date)
keys = []
seen = keys.append
data = dict()
for row in rows:
l = row.location_id
y = row.date.year
p = pos[row.parameter_id]
if (l, y, p) in keys:
continue
seen((l, y, p))
if p == pos[resilience_id]:
val = int(round(row.mean, 0))
else:
val = row.median
dev = row.mad
if l not in data:
ldata = data[l] = dict()
else:
ldata = data[l]
if y not in ldata:
ydata = ldata[y] = dict()
else:
ydata = ldata[y]
ydata[p] = (val, dev)
script = '''r=%s\n''' % json.dumps(data)
return script
# -----------------------------------------------------------------------------
def reportFilter(filter_request):
"""
Helper function to extract the selections from the side panel
and generate a resource filter
@todo: add filter for MY REPORTS
"""
sgtable = s3db.stats_group
sgtype = s3db.stats_group_type
gistable = s3db.gis_location
prtable = s3db.pr_person
query = (sgtable.deleted != True) &\
(sgtable.group_type_id == sgtype.id)
try:
loc_id = int(filter_request["location_id"])
except:
loc_id = -1
if loc_id != -1:
child_locations = current.gis.get_children(loc_id)
if len(child_locations) == 0:
query &= (sgtable.location_id == loc_id)
else:
child_ids = [row.id for row in child_locations]
query &= (sgtable.location_id.belongs(child_ids))
if filter_request["from_date"]:
query &= (sgtable.date >= filter_request["from_date"])
if filter_request["to_date"]:
query &= (sgtable.date <= filter_request["to_date"])
indicator = (sgtype.name == "stats_vca")
if "indicator" in filter_request:
indicator |= (sgtype.name == "vulnerability_indicator")
if "demographics" in filter_request:
indicator |= (sgtype.name == "stats_demographic")
if "map" in filter_request:
indicator |= (sgtype.name == "stats_map")
if "images" in filter_request:
indicator |= (sgtype.name == "stats_image")
if "reports" in filter_request:
indicator |= (sgtype.name == "stats_other")
query &= indicator
if "myReports" in filter_request:
user = auth.s3_logged_in_person()
query &= ((sgtable.approved_by == user) | (sgtable.created_by == user))
if "text" in filter_request and filter_request["text"] != "":
text = "%%%s%%" % filter_request["text"].lower()
query &= (sgtable.location_id == gistable.id)
query &= (sgtable.created_by == prtable.id)
query &= ((gistable.name.lower().like(text))
| (prtable.first_name.lower().like(text))
| (prtable.last_name.lower().like(text)))
return query
# End of reportFilter ------------------------------------------------
# -----------------------------------------------------------------------------
def reportDataTable(request):
"""
Helper function to return the dataTable that uses the selected
filter options
"""
from s3.s3utils import S3DataTable
sgtable = s3db.stats_group
# -------------------------------------------------------------------------
# Set up custom represents
# -------------------------------------------------------------------------
def location_repr(id):
"""
Return the location name (commune) wrapped in a span
"""
if not id:
repr_text = current.messages.NONE
else:
table = db.gis_location
row = db(table.id == id).select(table.name,
limitby=(0, 1)).first()
if not row:
repr_text = current.messages.UNKNOWN_OPT
repr_text = row.name
return SPAN(repr_text, _class="communeCell")
# -------------------------------------------------------------------------
def submitted_repr(id):
"""
Return the initial of the first name and the complete last name
"""
if not id:
repr_text = T("Imported data")
else:
table = db.pr_person
row = db(table.id == id).select(table.first_name,
table.last_name,
limitby=(0, 1)).first()
if row:
repr_text = "%s. %s" % (row.first_name[0], row.last_name)
else:
repr_text = current.messages.UNKNOWN_OPT
return repr_text
# -------------------------------------------------------------------------
def approved_repr(id):
"""
Return the initials of the first and the last name
"""
if id is None:
repr_text = T("Approval pending")
elif id == 0:
repr_text = T("Approved")
else:
table = db.pr_person
row = db(table.id == id).select(table.first_name,
table.last_name,
limitby=(0, 1)).first()
if row:
repr_text = T("Approved by %(first_initial)s.%(last_initial)s") % \
dict(first_initial = row.first_name[0],
last_initial = row.last_name[0])
else:
repr_text = current.messages.UNKNOWN_OPT
return repr_text
# -------------------------------------------------------------------------
def action_repr(id):
"""
Return the action button for this row
"""
if id is None:
repr_text = current.messages.NONE
else:
row = s3db.stats_group[id]
if row.approved_by != None:
repr_text = A(T("View"),
_id = id,
_class = "viewButton",
_href = "javascript:viewReportDetails(%s);" % id
)
else:
repr_text = A(T("Review"),
_id = id,
_class = "reviewButton",
_href = "javascript:showReportDetails(%s);" % id
)
repr_text.append(A(T("Close"),
_class = "closeReviewButton",
_href = "javascript:hideReportDetails(%s);" % id
))
return repr_text
sgtable.location_id.represent = location_repr
sgtable.created_by.represent = submitted_repr
sgtable.approved_by.represent = approved_repr
sgtable.id.represent = action_repr
# Ensure that we also get the records awaiting for approval
resource = s3db.resource("stats_group", unapproved=True)
filter_request = request.post_vars
if filter_request:
filter = reportFilter(filter_request)
resource.add_filter(filter)
filteredrows = resource.count()
#############################################################
# Note if list_fields is changed here then it also needs
# to be changed in index, where the table is initialised
#############################################################
list_fields = [(T("Action"), "id"),
(T("Date"), "date"),
(T("Commune Name"), "location_id"),
"location_id$L2",
"group",
(T("Type"), "group_type_id"),
(T("Submitted by"), "created_by"),
(T("Status"), "approved_by"),
]
if filteredrows > 0:
rows = resource.select(list_fields,
orderby=~sgtable.date,
start=0,
limit=filteredrows,
)
data = resource.extract(rows,
list_fields,
represent=True,
)
# The types are fixed and will always be displayed (even if empty)
type_totals = {"Approval pending" : 0,
"VCA Report" : 0,
"Report" : 0
}
# Calculate the report group totals
location_totals = {}
for item in data:
# Collect the type totals
group = item["stats_group.group"]
if not group:
group = "Report"
type_totals[group] += 1
# Collect the L2 sub totals
loc_code = "%s_%s" % (group, item["gis_location.L2"])
if loc_code in location_totals:
location_totals[loc_code] += 1
else:
location_totals[loc_code] = 1
rfields = resource.resolve_selectors(list_fields)[0]
dt = S3DataTable(rfields, data)
dt.defaultActionButtons(resource)
if request.extension == "html":
level_1_titles = [["Approval pending", T("Approval pending")],
["VCA Report", T("VCA Report")],
["Report", T("Report")],
]
if request.extension == "html":
report = dt.html(filteredrows,
filteredrows,
"report",
dt_displayLength = filteredrows,
dt_pagination = "false",
dt_bFilter = "false",
dt_sDom = "t",
dt_group = [3, 4],
dt_group_totals = [type_totals, location_totals],
dt_group_titles = [level_1_titles],
dt_ajax_url = URL(c="vulnerability",
f="report",
extension="aadata",
vars={"id": "report"},
),
dt_action_col = -1,
dt_group_space = "true",
dt_shrink_groups = "accordion",
dt_group_types = ["text", "none"],
)
reportCount = T("%(count)s Entries Found") % dict(count=filteredrows)
report.append(INPUT(_type="hidden",
_id="reportCount",
_name="config",
_value=reportCount))
else:
report = ""
if filteredrows > 0:
report = dt.json("report",
int(request.vars.sEcho),
filteredrows,
filteredrows,
dt_group_totals=[type_totals],
)
return str(report)
# -----------------------------------------------------------------------------
def getReportDetails(id, buttonsRequired):
"""
Method to get the details of a report from the stats_group id
It will build the custom display, which is essentially a form
wrapped around a table, if buttons are required then they will be added
allowing for the report to be approved or rejected.
"""
sgtable = s3db.stats_group
sgt_table = s3db.stats_group_type
ss_table = s3db.stats_source
query = (sgtable.id == id) & \
(sgtable.group_type_id == sgt_table.id)
rows = db(query).select(sgt_table.name,
sgtable.source_id,
limitby=(0, 1)).first()
reportType = rows.stats_group_type.name
reportSource_id = rows.stats_group.source_id
valid = True
if reportType == "vulnerability_indicator":
# Get the data for this report
vdtable = s3db.vulnerability_data
vitable = s3db.vulnerability_indicator
query = (vdtable.deleted == False) & \
(vdtable.group_id == id) & \
(vitable.parameter_id == vdtable.parameter_id)
rows = db(query).select(vdtable.value,
vitable.name,
orderby=vitable.posn)
# Build the custom table
table = TABLE(_class="indicatorsTable")
tr = TR()
th = TH(_class="indicatorLabels")
tr.append(th)
th = TH(DIV(1), _class="indicator1")
tr.append(th)
th = TH(DIV(2), _class="indicator2")
tr.append(th)
th = TH(DIV(3), _class="indicator3")
tr.append(th)
th = TH(DIV(4), _class="indicator4")
tr.append(th)
th = TH(DIV(5), _class="indicator5")
tr.append(th)
table.append(tr)
tr = TR()
th = TH()
tr.append(th)
th = TH(SPAN(XML("←"), _class="arrow"), _colspan=2)
th.append(T(" LOW RESILIENCE"))
tr.append(th)
th = TH(T(" HIGH RESILIENCE"),
_class="highResilienceLabel",
_colspan=3)
th.append(SPAN(XML("→"), _class="arrow"))
tr.append(th)
table.append(tr)
mark = XML("<mark>*</mark>")
tr_class = "white"
for row in rows:
tr_class = "gray" if tr_class == "white" else "white"
tr = TR(_class=tr_class)
name = row.vulnerability_indicator.name
td = TD(mark, _class="indicatorLabels")
td.append(name)
tr.append(td)
value = int(row.vulnerability_data.value)
for i in range(5):
option = INPUT(_type = "radio",
_class = "indicator%d" % (i + 1),
_name = name,
_value = i + 1,
value = value,
_disabled = "disabled",
)
tr.append(option)
table.append(tr)
elif reportType == "stats_demographic":
# Get the data for this report
ddtable = s3db.stats_demographic_data
sdtable = s3db.stats_demographic
query = (ddtable.deleted == False) & \
(ddtable.group_id == id) & \
(sdtable.parameter_id == ddtable.parameter_id)
rows = db(query).select(ddtable.value,
ddtable.location_id,
sdtable.name,
orderby = sdtable.name)
reportSource = ss_table[reportSource_id].name
# Build the custom table
table = TABLE(_class = "demographicsTable")
table.append(TR(TD(reportSource, _colspan=3)))
tr_class = "grey"
for row in rows:
tr_class = "grey" if tr_class == "white" else "white"
tr = TR(_class = tr_class)
name = row.stats_demographic.name
tr.append(TD(name, _class = "demoLabel"))
value = IS_INT_AMOUNT().represent(row.stats_demographic_data.value)
tr.append(TD(value, _class = "demoStatistic"))
location = s3db.gis_location_represent(row.stats_demographic_data.location_id,
show_link = False)
tr.append(TD(location, _class = "demoSource"))
table.append(tr)
elif reportType == "stats_map" or reportType == "stats_image":
ditable = s3db.doc_image
query = (ditable.source_id == reportSource_id)
record = db(query).select(limitby=(0, 1)).first()
if record:
size = (250, 250)
image = s3db.pr_image_represent(record.file, size=size)
size = s3db.pr_image_size(image, size)
desc = DIV(record.comments, _class="imageDesc")
filename = record.name
url_small = URL(c="default", f="download", args=image)
alt = record.comments if record.comments else filename
thumb = IMG(_src=url_small,
_alt=alt,
_width=size[0],
_height=size[1]
)
url_full = URL(c="default", f="download", args=record.file)
download = A(T("Download"), _class="download", _href=url_full)
view = A(T("View full size"),
_class="download",
_href=URL(c="vulnerability", f="view_image", args=record.id),
_target="blank")
table = TABLE(_class = "imageTable")
table.append(TR(TD(thumb, _colspan=4)))
table.append(TR(TD(desc),
TD(download),
TD(DIV(" | ", _class="divider")),
TD(view),
_class="mapRow"))
else:
valid = False
elif reportType == "stats_other" or reportType == "stats_vca":
doctable = s3db.doc_document
query = (doctable.source_id == reportSource_id)
record = db(query).select(limitby=(0, 1)).first()
if record:
desc = DIV(record.name, _class="imageDesc")
url = URL(c="default", f="download", args=record.file)
download = A(T("Download"), _class="download", _href=url)
table = TABLE(_class="imageTable")
table.append(TR(TD(desc),
TD(download),
_class="mapRow"))
else:
valid = False
else:
valid = False
# Place the table in a form and attach the buttons (if required)
form = FORM(_id="form%s" % id)
if valid:
form.append(table)
else:
form.append(DIV(T("No data available"), _class="mapRow"))
if buttonsRequired:
if valid:
form.append(INPUT(_type="button", _name="Approve%s" % id,
_value="Approve", _class="approveButton"))
form.append(INPUT(_type="button", _name="Decline%s" % id,
_value="Decline", _class="declineButton"))
return str(form)
# -----------------------------------------------------------------------------
def approveReport(id):
"""
Function to approve a report
"""
# Approve the doc source entity record
sgtable = s3db.stats_group
sgt_table = s3db.stats_group_type
resource = s3db.resource("stats_group", id=id, unapproved=True)
resource.approve()
# find the type of report that we have
query = (sgtable.id == id) & \
(sgtable.group_type_id == sgt_table.id)
record = db(query).select(sgt_table.name,
sgt_table.stats_group_instance,
limitby=(0, 1)).first()
rec_type = record.name
if rec_type == "vulnerability_indicator" or rec_type == "stats_demographic":
# Find the type of stats source record that we have
if rec_type == "vulnerability_indicator":
query = (s3db.vulnerability_data.group_id == id)
resource = s3db.resource("vulnerability_data", filter=query, unapproved=True)
resource.approve()
if rec_type == "stats_demographic":
query = (s3db.stats_demographic_data.group_id == id)
resource = s3db.resource("stats_demographic_data", filter=query, unapproved=True)
resource.approve()
# Approve the stats_data records
query = (s3db.stats_data.group_id == id)
resource = s3db.resource("stats_data", filter=query, unapproved=True)
resource.approve()
s3task.async("stats_group_clean")
return True
rec_instance = record.stats_group_instance
if rec_instance == "doc_image":
query = (sgtable.id == id) &\
(s3db.doc_image.source_id == sgtable.source_id)
resource = s3db.resource("doc_image", filter=query, unapproved=True)
resource.approve()
return True
elif rec_instance == "doc_document":
query = (sgtable.id == id) &\
(s3db.doc_document.source_id == sgtable.source_id)
resource = s3db.resource("doc_document", filter=query, unapproved=True)
resource.approve()
return True
return False
# -----------------------------------------------------------------------------
def declineReport(id):
"""
Function to decline to approve a report
"""
# Decline the doc source entity record
sgtable = s3db.stats_group
sgt_table = s3db.stats_group_type
# Find the type of report that we have
query = (sgtable.id == id) & \
(sgtable.group_type_id == sgt_table.id)
record = db(query).select(sgt_table.name,
limitby=(0, 1)).first()
rec_type = record.name
# Now that we have all the data reject the report
resource = s3db.resource("stats_group", id=id, unapproved=True)
resource.reject()
if rec_type == "vulnerability_indicator" or rec_type == "stats_demographic":
# Approve the stats_data records
query = (s3db.stats_data.group_id == id)
resource = s3db.resource("stats_data", filter=query, unapproved=True)
resource.reject()
# Find the type of stats source record that we have
if rec_type == "vulnerability_indicator":
query = (s3db.vulnerability_data.group_id == id)
resource = s3db.resource("vulnerability_data", filter=query, unapproved=True)
resource.reject()
if rec_type == "stats_demographic":
query = (s3db.stats_demographic_data.group_id == id)
resource = s3db.resource("stats_demographic_data", filter=query, unapproved=True)
resource.reject()
return True
return False
# -----------------------------------------------------------------------------
def report():
""" Not a REST Controller """
s3.no_formats = True
if request.args(0) == "filter":
report = reportDataTable(request)
data = json.dumps(report)
elif request.args(0) == "review" or request.args(0) == "view":
id = request.get_vars.id
buttonsRequired = request.args(0) == "review"
reportDetails = getReportDetails(id, buttonsRequired=buttonsRequired)
data = json.dumps(reportDetails)
elif request.args(0) == "approve":
# Check authorization
permitted = current.auth.s3_has_permission
authorised = permitted("approve", "stats_group")
if not authorised:
data = json.dumps(str(T("You are not permitted to approve documents")))
else:
id = request.post_vars.id
if approveReport(id):
report = reportDataTable(request)
data = json.dumps(report)
else:
data = json.dumps(str(T("Failed to approve")))
elif request.args(0) == "decline":
id = request.post_vars.id
if declineReport(id):
report = reportDataTable(request)
data = json.dumps(report)
else:
data = json.dumps(str(T("Decline failed")))
else:
filter = {}
date_widget = S3DateWidget(format="yy-mm-dd", future=0)
to_date = Field("to_date")
to_date._tablename = ""
from_date = Field("from_date")
from_date._tablename = ""
filter["to_date"] = str(date_widget(to_date, None))
filter["from_date"] = str(date_widget(from_date, None))
report = reportDataTable(request)
data_dict = {"filter" : filter,
"report" : report
}
data = json.dumps(data_dict)
response.headers["Content-Type"] = "application/json"
return data
# -----------------------------------------------------------------------------
def submitData():
""" Controller to manage the ajax-import of vulnerability data """
# Get the action to be performed
action = request.vars.action
if action == "vulnerability":
return import_vul_create()
elif action == "vulnerability_part1":
return import_vul_part1()
elif action == "vulnerability_part2":
return import_vul_part2()
elif action == "map":
return import_image(action)
elif action == "image":
return import_image(action)
elif action == "other":
return import_image(action)
elif action == "vca":
return import_image(action)
elif action == "demographics":
return import_demo_create()
# -----------------------------------------------------------------------------
def import_vul_create():
""" Controller to add a new set of vulnerability indicators """
sgtable = s3db.stats_group
sgt_table = s3db.stats_group_type
vd_table = s3db.vulnerability_data
# first add the stats_group
date = request.utcnow
creator = auth.s3_logged_in_person()
location_id = request.vars.location
group_type = db(sgt_table.name == "vulnerability_indicator").select(sgt_table.id,
limitby=(0, 1)
).first().id
sg_id = sgtable.insert(date = date,
location_id = location_id,
group_type_id = group_type,
created_by = creator
)
# Get the list of indicators
itable = s3db.vulnerability_indicator
rows = db(itable.deleted == False).select(itable.posn,
itable.parameter_id,
orderby=itable.posn)
update_super = s3db.update_super
for row in rows:
vd_id = vd_table.insert(parameter_id = row.parameter_id,
location_id = location_id,
value = request.vars[str(row.posn)],
date = date,
group_id = sg_id,
created_by = creator
)
update_super(vd_table, dict(id=vd_id))
# -----------------------------------------------------------------------------
def import_vul_part1():
"""
Controller to manage the first phase of the import of vulnerability indicators
"""
from gluon.serializers import json
try:
file = request.vars.file.file
except:
response.headers["Content-Type"] = "application/json"
return json({"Error": str(T("Error File missing"))})
# Check authorization
permitted = current.auth.s3_has_permission
authorised = permitted("create", "vulnerability_data")
if not authorised:
response.headers["Content-Type"] = "application/json"
return json({"Error": str(T("You are not permitted to upload files"))})
from lxml import etree
from datetime import datetime
creator = auth.s3_logged_in_person()
output = s3_rest_controller("vulnerability", "data",
csv_stylesheet="data.xsl")
upload_id = output[0]
item_ids = output[1]
data = output[2]
# This gets the data back from resource.extract() for all items
ele_dict = {}
# Collect all the data and group the vulnerability indicators
for value in data:
group_tuid = "Group"
ele = value["s3_import_item.element"]
ele = s3xml.xml_decode(ele)
try:
element = etree.fromstring(ele)
except:
return T("No valid data in the file")
# Get all the components
ctablename = element.get("name")
data_dict = {}
data = element.findall("data")
for item in data:
f = item.get("field", None)
v = item.get("value", None)
data_dict[f] = v
references = element.findall("reference")
for reference in references:
f = reference.get("field", None)
r = reference.get("resource", None)
t = reference.get("tuid", None)
data_dict[f] = (r, t)
if f == "group_id":
group_tuid = t
if group_tuid in ele_dict:
ele_dict[group_tuid].append(data_dict)
else:
ele_dict[group_tuid] = [data_dict]
# Now condense the data down to just what is required
# keyed on the group_tuid
# date, created_by, location, and a dict of indicators [param and value]
loc_label = gis.get_location_hierarchy("L4")
data_list = []
for (key,group) in ele_dict.items():
row = group[0]
group_dict = {}
group_dict["group"] = key
group_dict["date"] = datetime.strptime(row["date"], "%Y-%m-%d").strftime("%d-%b-%y")
group_dict["created_by"] = creator
loc = row["location_id"][1][12:] # strip location L#: from the tuid
loc = "%s %s" % (loc, loc_label)
group_dict["location"] = loc
indicator_dict = {}
param_len = len(row["parameter_id"][0]) +1 # include the separator
for row in group:
param = row["parameter_id"][1][param_len:]
indicator_dict[param] = row["value"]
group_dict["data"] = indicator_dict
data_list.append(group_dict)
response.headers["Content-Type"] = "application/json"
return json({"upload_id" : upload_id,
"items" : item_ids,
"data" : data_list
})
# -----------------------------------------------------------------------------
def import_vul_part2():
"""
Controller to manage the second phase of the import of vulnerability indicators
"""
job_id = request.vars.job
if not job_id:
return T("Error No Job ID's provided")
output = s3_rest_controller("vulnerability", "data",
csv_stylesheet="data.xsl")
totalRecords = output[0]
totalErrors = output[1]
totalIgnored = output[2]
from gluon.serializers import json
response.headers["Content-Type"] = "application/json"
return json({"totalRecords" : totalRecords,
"totalErrors" : totalErrors,
"totalIgnored" : totalIgnored
})
# -----------------------------------------------------------------------------
def import_image(action):
"""
Controller to import a report
"""
if action == "map" or action == "image":
doc_table = s3db.doc_image
else:
doc_table = s3db.doc_document
sgtable = s3db.stats_group
sgt_table = s3db.stats_group_type
di_file = doc_table.file
file = request.vars.file
real_filename = file.filename
new_filename = di_file.store(file, real_filename)
date = request.utcnow
creator = auth.s3_logged_in_person()
location_id = request.vars.location
desc = request.vars.desc
doc_id = doc_table.insert(file = new_filename,
name = real_filename,
date = date,
comments = desc,
location_id = location_id,
created_by = creator
)
s3db.update_super(doc_table, dict(id=doc_id))
source_id = doc_table[doc_id].source_id
if action == "map":
group = "stats_map"
elif action == "image":
group = "stats_image"
elif action == "other":
group = "stats_other"
elif action == "vca":
group = "stats_vca"
if action == "map" or action == "image":
# Create a thumbnail of the image
s3db.pr_image_resize(file.file,
new_filename,
real_filename,
(250, 250),
)
group_type = db(sgt_table.name == group).select(sgt_table.id,
limitby=(0, 1)).first().id
sgtable.insert(source_id = source_id,
group_type_id = group_type,
date = date,
location_id = location_id,
created_by = creator
)
# -----------------------------------------------------------------------------
def import_demo_create():
"""
Controller to import demographic data
"""
sgtable = s3db.stats_group
sgt_table = s3db.stats_group_type
ss_table = s3db.stats_source
sd_table = s3db.stats_demographic
sdd_table = s3db.stats_demographic_data
dd_table = s3db.doc_document
update_super = s3db.update_super
# first get the demographic data and source
last_source = ""
source_list = {} # the source_id for this source
group_list = {} # the group_id for this source
demo_string_list = ["Population",
"Male",
"Female",
"Over 60",
"Under 5",
"Households",
"Households below poverty line"
]
demographics_list = []
data = []
for x in range(7):
source = request.vars["sourceField%s" % x]
if source == "":
source = last_source
else:
last_source = source
data.append((request.vars["demoField%s" % x],
source))
if source != "":
if source not in source_list:
record = db(ss_table.name == source).select(ss_table.id,
limitby=(0, 1)
).first()
if record == None:
# Save the source details & SE
doc_id = dd_table.insert(name = source)
update_super(dd_table, dict(id=doc_id))
source_id = dd_table[doc_id].source_id
else:
source_id = record.source_id
source_list[source] = source_id
else:
source_list[""] = None # added so that a group with no source will be created
# Now get the parameter_id in demo_string_list order
sd_rows = db().select(sd_table.name,
sd_table.parameter_id)
demo_recs = {}
for record in sd_rows:
demo_recs[record.name] = record.parameter_id
for demo_string in demo_string_list:
if demo_string in demo_recs:
demographics_list.append(demo_recs[demo_string])
else:
demographics_list.append(None) # Should never have this
# Now get the stats_group
date = request.utcnow
creator = auth.s3_logged_in_person()
location_id = request.vars.location
group_type = db(sgt_table.name == "stats_demographic").select(sgt_table.id,
limitby=(0, 1)
).first().id
for (source, id) in source_list.items():
sg_id = sgtable.insert(date = date,
location_id = location_id,
group_type_id = group_type,
source_id = id,
created_by = creator
)
group_list[source] = sg_id
# Now save the demographic data
for x in range(7):
sdd_id = sdd_table.insert(parameter_id = demographics_list[x],
location_id = location_id,
value = data[x][0],
date = date,
group_id = group_list[data[x][1]],
created_by = creator
)
update_super(sdd_table, dict(id=sdd_id))
# -----------------------------------------------------------------------------
def indicator():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def aggregated_indicator():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def data():
""" REST Controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def view_image():
""" Not a REST Controller """
di_table = s3db.doc_image
id = request.args[0]
record = db(di_table.id == id).select(limitby=(0, 1)).first()
desc = DIV(record.comments, _class="imageDesc")
filename = record.name
url = URL(c="default", f="download", args=record.file)
alt = record.comments if record.comments else filename
image = IMG(_src=url,
_alt=alt,
)
output = Storage()
output.image = image
output.desc = desc
return output
# END =========================================================================
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
def fake_get_vnc_console(self, _context, _instance, _console_type):
return {'url': 'http://fake'}
def fake_get_spice_console(self, _context, _instance, _console_type):
return {'url': 'http://fake'}
def fake_get_vnc_console_invalid_type(self, _context,
_instance, _console_type):
raise exception.ConsoleTypeInvalid(console_type=_console_type)
def fake_get_spice_console_invalid_type(self, _context,
_instance, _console_type):
raise exception.ConsoleTypeInvalid(console_type=_console_type)
def fake_get_vnc_console_not_ready(self, _context, instance, _console_type):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_get_spice_console_not_ready(self, _context, instance, _console_type):
raise exception.InstanceNotReady(instance_id=instance["uuid"])
def fake_get_vnc_console_not_found(self, _context, instance, _console_type):
raise exception.InstanceNotFound(instance_id=instance["uuid"])
def fake_get_spice_console_not_found(self, _context, instance, _console_type):
raise exception.InstanceNotFound(instance_id=instance["uuid"])
def fake_get(self, context, instance_uuid):
return {'uuid': instance_uuid}
def fake_get_not_found(self, context, instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
class ConsolesExtensionTest(test.TestCase):
def setUp(self):
super(ConsolesExtensionTest, self).setUp()
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console)
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.app = fakes.wsgi_app_v3(init_only=('servers',
'os-remote-consoles'))
def test_get_vnc_console(self):
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
output = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(output,
{u'console': {u'url': u'http://fake', u'type': u'novnc'}})
def test_get_vnc_console_not_ready(self):
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_not_ready)
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
output = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 409)
def test_get_vnc_console_no_type(self):
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_invalid_type)
body = {'os-getVNCConsole': {}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_vnc_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_vnc_console_no_instance_on_console_get(self):
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_not_found)
body = {'os-getVNCConsole': {'type': 'novnc'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_vnc_console_invalid_type(self):
body = {'os-getVNCConsole': {'type': 'invalid'}}
self.stubs.Set(compute_api.API, 'get_vnc_console',
fake_get_vnc_console_invalid_type)
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_spice_console(self):
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
output = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(output,
{u'console': {u'url': u'http://fake', u'type': u'spice-html5'}})
def test_get_spice_console_not_ready(self):
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_not_ready)
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
output = jsonutils.loads(res.body)
self.assertEqual(res.status_int, 409)
def test_get_spice_console_no_type(self):
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_invalid_type)
body = {'os-getSPICEConsole': {}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
def test_get_spice_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_spice_console_no_instance_on_console_get(self):
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_not_found)
body = {'os-getSPICEConsole': {'type': 'spice-html5'}}
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 404)
def test_get_spice_console_invalid_type(self):
body = {'os-getSPICEConsole': {'type': 'invalid'}}
self.stubs.Set(compute_api.API, 'get_spice_console',
fake_get_spice_console_invalid_type)
req = webob.Request.blank('/v3/servers/1/action')
req.method = "POST"
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = req.get_response(self.app)
self.assertEqual(res.status_int, 400)
|
|
#!/usr/bin/env python
"""
Where you at?
"""
__author__ = "Alex Drlica-Wagner"
__email__ = "kadrlica@fnal.gov"
__version__ = "0.0.0"
"""
TODO:
- by default, show all images from current night
- clip bad data for previous nights
- 5-frame, 1 per band plots, including survey history
- (reach) upcoming exposures
- (reach) color sky brightness background
"""
import sys,os
import logging
from collections import OrderedDict as odict
from datetime import datetime,timedelta,tzinfo
import dateutil.parser
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Ellipse
#from matplotlib.patheffects import f
import numpy as np
import pylab as plt
import ephem
# For coloring filters
FILTERS = ['u','g','r','i','z','Y','VR']
BANDS = FILTERS + ['all']
COLORS = odict([
('none','black'),
('u','blue'),
('g','green'),
('r','red'),
('i','gold'),
('z','magenta'),
('Y','black'),
('VR','gray'),
])
# For accessing footprints
FOOTPATH = 'SISPI_FOOTPRINT'
FOOTPRINT = odict([
('des', 'round13-poly.txt'),
('none',None),
])
# Derived from telra,teldec of 10000 exposures
SN = odict([
('E1',(7.874, -43.010)),
('E2',(9.500, -43.999)),
('X1',(34.476, -4.931)),
('X2',(35.664,-6.413)),
('X3',(36.449, -4.601)),
('S1',(42.818, 0.000)),
('S2',(41.193, -0.991)),
('C1',(54.274, -27.113)),
('C2',(54.274, -29.090)),
('C3',(52.647, -28.101)),
])
SN_LABELS = odict([
('SN-E',(8,-41)),
('SN-X',(35,-12)),
('SN-S',(45,1)),
('SN-C',(55,-35)),
])
#http://www.ctio.noao.edu/noao/content/Coordinates-Observatories-Cerro-Tololo-and-Cerro-Pachon
#http://arxiv.org/pdf/1210.1616v3.pdf
#(-30 10 10.73, -70 48 23.52, 2213m)
TEL_LON = -70.80653
TEL_LAT = -30.169647
TEL_HEIGHT = 2213
CTIO = ephem.Observer()
CTIO.lon,CTIO.lat = str(TEL_LON),str(TEL_LAT)
CTIO.elevation = TEL_HEIGHT
# Default maximum number of exposures to grab from DB
NMAX = 50000
# Stupid timezone definition
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
def safe_proj(proj,lon,lat):
""" Remove points outside of projection """
x,y = proj(np.asarray(lon),np.asarray(lat))
x[x > 1e29] = None
y[y > 1e29] = None
return x,y
def airmass_angle(x=1.4):
""" Zenith angle for a given airmass limit """
return 90.-np.degrees(np.arcsin(1./x))
def load_data(filename=None):
""" Load the data (either from DB of file). """
dtype=[('expnum',int),('telra',float),('teldec',float),('filter',object)]
if filename is None:
### INSERT KLAUS' CODE HERE ###
from database import Database
db = Database()
db.connect()
query = "SELECT id,telra,teldec,filter FROM exposure WHERE exposed = TRUE AND flavor LIKE '%s' AND propid LIKE '%s' ORDER BY id DESC LIMIT %i"%(opts.flavor,opts.propid,NMAX)
print query
return np.rec.array(db.execute(query),dtype=dtype)
else:
return np.loadtxt(filename,dtype=dtype)
def load_footprint(footprint='des'):
""" Load a footprint file """
dtype=[('ra',float),('dec',float)]
if footprint is None or footprint=='none':
return np.array(len(dtype)*[[]],dtype=dtype)
basedir = os.path.dirname(os.path.abspath(__file__))
default = os.path.join(basedir,'..','data')
dirname = os.environ.get(FOOTPATH,default)
basename = FOOTPRINT[footprint]
filename = os.path.join(dirname,basename)
if not os.path.exists(filename):
msg = "Footprint file not found: %s"%filename
raise IOError(msg)
return np.loadtxt(filename,dtype=dtype)
def lmst(datetime):
""" Calculate Local Mean Sidereal Time (LMST) """
lmst = np.degrees(CTIO.sidereal_time())
logging.debug('Using pyephem for LMST: %.3f'%lmst)
return lmst
def moon(datetime):
""" Moon location """
moon = ephem.Moon()
moon.compute(CTIO)
moon_phase = moon.moon_phase * 100
moon_ra,moon_dec = np.degrees([moon.ra,moon.dec])
return (moon_ra, moon_dec),moon_phase
def boolean(string):
""" Convert strings to booleans for argparse """
string = string.lower()
if string in ['0', 'f', 'false', 'no', 'off']:
return False
elif string in ['1', 't', 'true', 'yes', 'on']:
return True
else:
raise ValueError()
if __name__ == "__main__":
import argparse
description = __doc__
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('expnum',nargs='?',type=int,default=None,
help="exposure number to plot")
parser.add_argument('-a','--airmass',default=1.4,type=float,
help='plot airmass limit')
#parser.add_argument('--after',default=None,
# help='plot exposures after a given UTC/ExpNum')
parser.add_argument('-b','--band',default='all',choices=BANDS,
help='plot exposures in specific band')
#parser.add_argument('--before',default=None,
# help='plot exposures before a given UTC/ExpNum')
parser.add_argument('-c','--color',default=True,type=boolean,
help='plot color corresponding to filter')
parser.add_argument('-f','--footprint',default='des',choices=FOOTPRINT.keys(),
action='append',help='footprint to plot')
parser.add_argument('--flavor',default='object',type=str,
help='exposure flavor [object,flat,etc.]')
parser.add_argument('-i','--infile',default=None,
help='list of exposures to plot')
parser.add_argument('-o','--outfile',default=None,
help='output file for saving figure')
parser.add_argument('-m','--moon',default=True,type=boolean,
help='plot moon location and phase')
parser.add_argument('-n','--numexp',default=10,type=float,
help='number of exposures to plot')
parser.add_argument('--propid',default='%',
help='propid to filter exposures')
parser.add_argument('--utc',default=None,
help="UTC for plot (defaults to now)")
parser.add_argument('-v','--verbose',action='store_true',
help='verbosity')
parser.add_argument('--version',action='version',version='%(prog)s '+__version__)
parser.add_argument('-z','--zenith',default=True,type=boolean,
help="plot zenith position")
opts = parser.parse_args()
# Set logging level
logging.basicConfig(level=logging.DEBUG if opts.verbose else logging.INFO,
format='%(message)s',stream=sys.stdout)
# Parse UTC
if opts.utc is None:
utc = datetime.now(tz=UTC())
else:
utc = dateutil.parser.parse(opts.utc,tzinfos={'UTC':UTC})
logging.debug("UTC: %s"%utc.strftime('%Y-%m-%d %H:%M:%S'))
CTIO.date = utc
# Grab the data
data = load_data(opts.infile)
# Subselect the data
select = np.in1d(data['filter'],FILTERS)
if opts.band in FILTERS:
select &= (data['filter'] == opts.band)
select &= (np.arange(len(data)) < opts.numexp)
expnum,telra,teldec,band = data['expnum'],data['telra'],data['teldec'],data['filter']
# Select the exposure of interest
if opts.expnum:
match = np.char.array(expnum).endswith(str(opts.expnum))
if not match.any():
msg = "Exposure matching %s not found"%opts.expnum
raise ValueError(msg)
idx = np.nonzero(match)[0][0]
else:
idx = 0
# Set the colors
if opts.color:
nexp = len(expnum)
ncolors = len(COLORS)
color_repeat = np.repeat(COLORS.keys(),nexp).reshape(ncolors,nexp)
color_idx = np.argmax(band==color_repeat,axis=0)
color = np.array(COLORS.values())[color_idx]
else:
color = COLORS['none']
# Create the figure
fig,ax = plt.subplots(figsize=(12,8))
#fig,ax = plt.subplots()
# Create the Basemap
lon_0 = lmst(utc); lat_0 = TEL_LAT
m = Basemap(projection='ortho',lon_0=lon_0,lat_0=lat_0)
parallels = np.arange(-90.,120.,30.)
m.drawparallels(parallels)
meridians = np.arange(0.,420.,60.)
m.drawmeridians(meridians)
for mer in meridians[:-1]:
plt.annotate(r'$%i^{\circ}$'%mer,m(mer,5),ha='center')
plt.annotate('East',xy=(1.02,0.5),ha='left',xycoords='axes fraction')
plt.annotate('West',xy=(-.02,0.5),ha='right',xycoords='axes fraction')
exp_zorder = 10
exp_kwargs = dict(s=40,marker='H',zorder=exp_zorder,edgecolor='k',lw=1)
# Projected exposure locations
x,y = safe_proj(m,telra,teldec)
# Plot exposure of interest
logging.debug("Plotting exposure: %i (%3.2f,%3.2f)"%(expnum[idx],telra[idx],teldec[idx]))
m.scatter(x[idx],y[idx],color=color,**exp_kwargs)
# Plot previous exposures
nexp_kwargs = dict(exp_kwargs)
nexp_kwargs.update(zorder=exp_zorder-1,alpha=0.2,edgecolor='none')#,lw=0)
logging.debug("Plotting last %i exposures"%opts.numexp)
m.scatter(x[select],y[select],color=color[select],**nexp_kwargs)
# Plot zenith position & focal plane scale
zen_x,zen_y = m(lon_0,lat_0)
zen_kwargs = dict(color='green',alpha=0.75,lw=1,zorder=0)
if opts.zenith:
logging.debug("Plotting zenith: (%.2f,%.2f)"%(lon_0,lat_0))
m.plot(zen_x,zen_y,'+',ms=10,**zen_kwargs)
logging.debug("Plotting focal plane scale.")
m.tissot(lon_0, lat_0, 1.0, 100, fc='none', **zen_kwargs)
# Plot airmass circle
if not np.isnan(opts.airmass):
logging.debug("Plotting airmass: %s"%opts.airmass)
angle = airmass_angle(opts.airmass)
m.tissot(lon_0, lat_0, angle, 100, fc='none',**zen_kwargs)
# Moon location and phase
if opts.moon:
(moon_ra,moon_dec),moon_phase = moon(utc)
logging.debug("Plotting moon: %i%%,(%.1f,%.1f)"%(moon_phase,moon_ra,moon_dec))
moon_txt = '%i%%'%moon_phase
moon_kwargs = dict(zorder=exp_zorder-1,fontsize=10,va='center',ha='center',
bbox=dict(boxstyle='circle,pad=0.4',fc='k',ec='k',alpha=0.25,lw=2))
ax.annotate(moon_txt,m(moon_ra,moon_dec),**moon_kwargs)
# Plot footprint(s) (should eventually be a loop over all footprints)
ft_kwargs = dict(marker='o',mew=0,mfc='none',color='b',lw=2,zorder=exp_zorder-3)
perim = load_footprint(opts.footprint)
logging.debug("Plotting footprint: %s"%opts.footprint)
proj = safe_proj(m,perim['ra'],perim['dec'])
m.plot(*proj,**ft_kwargs)
if opts.footprint == 'des':
# Plot the SN fields
logging.debug("Plotting supernova fields.")
# This does the projection correctly, but fails at boundary
sn_kwargs = dict(facecolor='none',edgecolor=ft_kwargs['color'],zorder=exp_zorder-1)
# Check that point inside boundary
fact = 0.99
boundary = Ellipse((m.rmajor,m.rminor),2*(fact*m.rmajor),2*(fact*m.rminor))
for v in SN.values():
if not boundary.contains_point(m(*v)): continue
m.tissot(v[0],v[1],1.0,100,**sn_kwargs)
# The SN labels
sntxt_kwargs = dict(zorder=exp_zorder-1,fontsize=12,
bbox=dict(boxstyle='round,pad=0',fc='w',ec='none',
alpha=0.25))
for k,v in SN_LABELS.items():
ax.annotate(k,m(*v),**sntxt_kwargs)
# Annotate with some information
logging.debug("Adding info text.")
bbox_props = dict(boxstyle='round', facecolor='white')
textstr= "%s %s\n"%("UTC:",utc.strftime('%Y-%m-%d %H:%M:%S'))
textstr+="%s %i (%s)\n"%("Exposure:",expnum[idx],band[idx])
textstr+="%s %i\n"%("NExp:",opts.numexp)
textstr+="%s (%.1f$^{\circ}$,%.1f$^{\circ}$)\n"%("Zenith:",lon_0,lat_0)
textstr+="%s %s\n"%("Airmass:",opts.airmass)
textstr+="%s %i%% (%.1f$^{\circ}$,%.1f$^{\circ}$)\n"%("Moon:",moon_phase,moon_ra,moon_dec)
textstr+="%s %s"%("Footprint:",opts.footprint)
ax.annotate(textstr, xy=(0.98,0.98), xycoords='axes fraction',
fontsize=10,ha='left',va='top', bbox=bbox_props)
# Plot filter legend
if opts.color:
logging.debug("Adding filter legend.")
leg_kwargs = dict(scatterpoints=1,fontsize=10,bbox_to_anchor=(0.08,0.20))
handles, labels = [],[]
for k in FILTERS:
if k == 'VR':
if not (band[select]=='VR').any() and not band[idx]=='VR':
continue
labels.append(k)
handles.append(plt.scatter(None,None,color=COLORS[k],**exp_kwargs))
plt.legend(handles,labels,**leg_kwargs)
# Save the figure
if opts.outfile:
logging.debug("Saving figure to: %s"%opts.outfile)
plt.savefig(opts.outfile)#,bbox_inches='tight')
plt.show()
|
|
import os
import unittest
from unittest.mock import MagicMock, patch
import dbt.clients.system
import dbt.compilation
import dbt.exceptions
import dbt.flags
import dbt.linker
import dbt.parser
import dbt.config
import dbt.utils
import dbt.parser.manifest
from dbt.contracts.graph.manifest import FilePath, SourceFile, FileHash, Manifest
from dbt.contracts.graph.parsed import ParsedMacro
from dbt.parser.results import ParseResult
from dbt.parser.base import BaseParser
from dbt.node_types import NodeType
try:
from queue import Empty
except ImportError:
from Queue import Empty
from dbt.logger import GLOBAL_LOGGER as logger # noqa
from .utils import config_from_parts_or_dicts, generate_name_macros, MockMacro
class GraphTest(unittest.TestCase):
def tearDown(self):
self.write_gpickle_patcher.stop()
self.load_projects_patcher.stop()
self.file_system_patcher.stop()
self.get_adapter_patcher.stop()
self.get_adapter_patcher_parser.stop()
self.mock_filesystem_constructor.stop()
self.mock_hook_constructor.stop()
self.load_patch.stop()
self.load_source_file_patcher.stop()
# self.relation_update_patcher.stop()
def setUp(self):
dbt.flags.STRICT_MODE = True
self.graph_result = None
self.write_gpickle_patcher = patch('networkx.write_gpickle')
self.load_projects_patcher = patch('dbt.parser.manifest._load_projects')
self.file_system_patcher = patch.object(
dbt.parser.search.FilesystemSearcher, '__new__'
)
self.hook_patcher = patch.object(
dbt.parser.hooks.HookParser, '__new__'
)
self.get_adapter_patcher = patch('dbt.context.providers.get_adapter')
self.factory = self.get_adapter_patcher.start()
# also patch this one
self.get_adapter_patcher_parser = patch('dbt.parser.base.get_adapter')
self.factory_cmn = self.get_adapter_patcher_parser.start()
def mock_write_gpickle(graph, outfile):
self.graph_result = graph
self.mock_write_gpickle = self.write_gpickle_patcher.start()
self.mock_write_gpickle.side_effect = mock_write_gpickle
self.profile = {
'outputs': {
'test': {
'type': 'postgres',
'threads': 4,
'host': 'thishostshouldnotexist',
'port': 5432,
'user': 'root',
'pass': 'password',
'dbname': 'dbt',
'schema': 'dbt_test'
}
},
'target': 'test'
}
self.mock_load_projects = self.load_projects_patcher.start()
def _load_projects(config, paths):
yield config.project_name, config
self.mock_load_projects.side_effect = _load_projects
self.mock_models = []
def _mock_parse_result(config, all_projects):
return ParseResult(
vars_hash=FileHash.from_contents('vars'),
project_hashes={name: FileHash.from_contents(name) for name in all_projects},
profile_hash=FileHash.from_contents('profile'),
)
self.load_patch = patch('dbt.parser.manifest.make_parse_result')
self.mock_parse_result = self.load_patch.start()
self.mock_parse_result.side_effect = _mock_parse_result
self.load_source_file_patcher = patch.object(BaseParser, 'load_file')
self.mock_source_file = self.load_source_file_patcher.start()
self.mock_source_file.side_effect = lambda path: [n for n in self.mock_models if n.path == path][0]
# self.relation_update_patcher = patch.object(RelationUpdate, '_relation_components', lambda: [])
# self.mock_relation_update = self.relation_update_patcher.start()
self.internal_manifest = Manifest.from_macros(macros={
n.unique_id: n for n in generate_name_macros('test_models_compile')
})
def filesystem_iter(iter_self):
if 'sql' not in iter_self.extension:
return []
if 'models' not in iter_self.relative_dirs:
return []
return [model.path for model in self.mock_models]
def create_filesystem_searcher(cls, project, relative_dirs, extension):
result = MagicMock(project=project, relative_dirs=relative_dirs, extension=extension)
result.__iter__.side_effect = lambda: iter(filesystem_iter(result))
return result
def create_hook_patcher(cls, results, project, relative_dirs, extension):
result = MagicMock(results=results, project=project, relative_dirs=relative_dirs, extension=extension)
result.__iter__.side_effect = lambda: iter([])
return result
self.mock_filesystem_constructor = self.file_system_patcher.start()
self.mock_filesystem_constructor.side_effect = create_filesystem_searcher
self.mock_hook_constructor = self.hook_patcher.start()
self.mock_hook_constructor.side_effect = create_hook_patcher
def get_config(self, extra_cfg=None):
if extra_cfg is None:
extra_cfg = {}
cfg = {
'name': 'test_models_compile',
'version': '0.1',
'profile': 'test',
'project-root': os.path.abspath('.'),
}
cfg.update(extra_cfg)
return config_from_parts_or_dicts(project=cfg, profile=self.profile)
def get_compiler(self, project):
return dbt.compilation.Compiler(project)
def use_models(self, models):
for k, v in models.items():
path = FilePath(
searched_path='models',
project_root=os.path.normcase(os.getcwd()),
relative_path='{}.sql'.format(k),
)
source_file = SourceFile(path=path, checksum=FileHash.empty())
source_file.contents = v
self.mock_models.append(source_file)
def load_manifest(self, config):
loader = dbt.parser.manifest.ManifestLoader(config, {config.project_name: config})
loader.load(internal_manifest=self.internal_manifest)
return loader.create_manifest()
def test__single_model(self):
self.use_models({
'model_one': 'select * from events',
})
config = self.get_config()
manifest = self.load_manifest(config)
compiler = self.get_compiler(config)
linker = compiler.compile(manifest)
self.assertEqual(
list(linker.nodes()),
['model.test_models_compile.model_one'])
self.assertEqual(
list(linker.edges()),
[])
def test__two_models_simple_ref(self):
self.use_models({
'model_one': 'select * from events',
'model_two': "select * from {{ref('model_one')}}",
})
config = self.get_config()
manifest = self.load_manifest(config)
compiler = self.get_compiler(config)
linker = compiler.compile(manifest)
self.assertCountEqual(
linker.nodes(),
[
'model.test_models_compile.model_one',
'model.test_models_compile.model_two',
]
)
self.assertCountEqual(
linker.edges(),
[('model.test_models_compile.model_one', 'model.test_models_compile.model_two',)]
)
def test__model_materializations(self):
self.use_models({
'model_one': 'select * from events',
'model_two': "select * from {{ref('model_one')}}",
'model_three': "select * from events",
'model_four': "select * from events",
})
cfg = {
"models": {
"materialized": "table",
"test_models_compile": {
"model_one": {"materialized": "table"},
"model_two": {"materialized": "view"},
"model_three": {"materialized": "ephemeral"}
}
}
}
config = self.get_config(cfg)
manifest = self.load_manifest(config)
compiler = self.get_compiler(config)
linker = compiler.compile(manifest)
expected_materialization = {
"model_one": "table",
"model_two": "view",
"model_three": "ephemeral",
"model_four": "table"
}
for model, expected in expected_materialization.items():
key = 'model.test_models_compile.{}'.format(model)
actual = manifest.nodes[key].config.materialized
self.assertEqual(actual, expected)
def test__model_incremental(self):
self.use_models({
'model_one': 'select * from events'
})
cfg = {
"models": {
"test_models_compile": {
"model_one": {
"materialized": "incremental",
"unique_key": "id"
},
}
}
}
config = self.get_config(cfg)
manifest = self.load_manifest(config)
compiler = self.get_compiler(config)
linker = compiler.compile(manifest)
node = 'model.test_models_compile.model_one'
self.assertEqual(list(linker.nodes()), [node])
self.assertEqual(list(linker.edges()), [])
self.assertEqual(manifest.nodes[node].config.materialized, 'incremental')
def test__dependency_list(self):
self.use_models({
'model_1': 'select * from events',
'model_2': 'select * from {{ ref("model_1") }}',
'model_3': '''
select * from {{ ref("model_1") }}
union all
select * from {{ ref("model_2") }}
''',
'model_4': 'select * from {{ ref("model_3") }}'
})
config = self.get_config()
manifest = self.load_manifest(config)
compiler = self.get_compiler(config)
linker = compiler.compile(manifest)
models = ('model_1', 'model_2', 'model_3', 'model_4')
model_ids = ['model.test_models_compile.{}'.format(m) for m in models]
manifest = MagicMock(nodes={
n: MagicMock(unique_id=n)
for n in model_ids
})
manifest.expect.side_effect = lambda n: MagicMock(unique_id=n)
queue = linker.as_graph_queue(manifest)
for model_id in model_ids:
self.assertFalse(queue.empty())
got = queue.get(block=False)
self.assertEqual(got.unique_id, model_id)
with self.assertRaises(Empty):
queue.get(block=False)
queue.mark_done(got.unique_id)
self.assertTrue(queue.empty())
def test__partial_parse(self):
config = self.get_config()
loader = dbt.parser.manifest.ManifestLoader(config, {config.project_name: config})
loader.load(internal_manifest=self.internal_manifest)
loader.create_manifest()
results = loader.results
self.assertTrue(loader.matching_parse_results(results))
too_low = results.replace(dbt_version='0.0.1a1')
self.assertFalse(loader.matching_parse_results(too_low))
too_high = results.replace(dbt_version='99999.99.99')
self.assertFalse(loader.matching_parse_results(too_high))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2009 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
import math
import numpy
import os.path
from rmgpy.cantherm.common import checkConformerEnergy
import rmgpy.constants as constants
from rmgpy.statmech import IdealGasTranslation, NonlinearRotor, LinearRotor, HarmonicOscillator, Conformer
################################################################################
class GaussianLog:
"""
Represent a log file from Gaussian. The attribute `path` refers to the
location on disk of the Gaussian log file of interest. Methods are provided
to extract a variety of information into CanTherm classes and/or NumPy
arrays.
"""
def __init__(self, path):
self.path = path
def getNumberOfAtoms(self):
"""
Return the number of atoms in the molecular configuration used in
the Gaussian log file.
"""
Natoms = 0
# Open Gaussian log file for parsing
f = open(self.path, 'r')
line = f.readline()
while line != '' and Natoms == 0:
# Automatically determine the number of atoms
if 'Input orientation:' in line and Natoms == 0:
for i in range(5): line = f.readline()
while '---------------------------------------------------------------------' not in line:
Natoms += 1
line = f.readline()
line = f.readline()
# Close file when finished
f.close()
# Return the result
return Natoms
def loadForceConstantMatrix(self):
"""
Return the force constant matrix from the Gaussian log file. The job
that generated the log file must have the option ``iop(7/33=1)`` in
order for the proper force constant matrix (in Cartesian coordinates)
to be printed in the log file. If multiple such matrices are identified,
only the last is returned. The units of the returned force constants
are J/m^2. If no force constant matrix can be found in the log file,
``None`` is returned.
"""
F = None
Natoms = self.getNumberOfAtoms()
Nrows = Natoms * 3
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read force constant matrix
if 'Force constants in Cartesian coordinates:' in line:
F = numpy.zeros((Nrows,Nrows), numpy.float64)
for i in range(int(math.ceil(Nrows / 5.0))):
# Header row
line = f.readline()
# Matrix element rows
for j in range(i*5, Nrows):
data = f.readline().split()
for k in range(len(data)-1):
F[j,i*5+k] = float(data[k+1].replace('D', 'E'))
F[i*5+k,j] = F[j,i*5+k]
# Convert from atomic units (Hartree/Bohr_radius^2) to J/m^2
F *= 4.35974417e-18 / 5.291772108e-11**2
line = f.readline()
# Close file when finished
f.close()
return F
def loadGeometry(self):
"""
Return the optimum geometry of the molecular configuration from the
Gaussian log file. If multiple such geometries are identified, only the
last is returned.
"""
number = []; coord = []
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Automatically determine the number of atoms
if 'Input orientation:' in line:
number = []; coord = []
for i in range(5): line = f.readline()
while '---------------------------------------------------------------------' not in line:
data = line.split()
number.append(int(data[1]))
coord.append([float(data[3]), float(data[4]), float(data[5])])
line = f.readline()
line = f.readline()
# Close file when finished
f.close()
coord = numpy.array(coord, numpy.float64)
number = numpy.array(number, numpy.int)
mass = numpy.zeros(len(number), numpy.float64)
# Use the atomic mass of the most common isotope rather than the
# average atomic mass
# These values were taken from "Atomic Weights and Isotopic Compositions" v3.0 (July 2010) from NIST
for i in range(len(number)):
if number[i] == 1:
mass[i] = 1.00782503207
elif number[i] == 6:
mass[i] = 12.0
elif number[i] == 7:
mass[i] = 14.0030740048
elif number[i] == 8:
mass[i] = 15.99491461956
elif number[i] == 15:
mass[i] = 30.97376163
elif number[i] == 16:
mass[i] = 31.97207100
elif number[i] == 17:
mass[i] = 35.4527
else:
print 'Atomic number {0:d} not yet supported in loadGeometry().'.format(number[i])
return coord, number, mass
def loadConformer(self, symmetry=None, spinMultiplicity=None, opticalIsomers=1):
"""
Load the molecular degree of freedom data from a log file created as
the result of a Gaussian "Freq" quantum chemistry calculation. As
Gaussian's guess of the external symmetry number is not always correct,
you can use the `symmetry` parameter to substitute your own value; if
not provided, the value in the Gaussian log file will be adopted. In a
log file with multiple Thermochemistry sections, only the last one will
be kept.
"""
modes = []
E0 = 0.0
f = open(self.path, 'r')
line = f.readline()
while line != '':
# The data we want is in the Thermochemistry section of the output
if '- Thermochemistry -' in line:
modes = []
inPartitionFunctions = False
line = f.readline()
while line != '':
# This marks the end of the thermochemistry section
if '-------------------------------------------------------------------' in line:
break
# Read molecular mass for external translational modes
elif 'Molecular mass:' in line:
mass = float(line.split()[2])
translation = IdealGasTranslation(mass=(mass,"amu"))
modes.append(translation)
# Read Gaussian's estimate of the external symmetry number
elif 'Rotational symmetry number' in line and symmetry is None:
symmetry = int(float(line.split()[3]))
# Read moments of inertia for external rotational modes
elif 'Rotational constants (GHZ):' in line:
inertia = [float(d) for d in line.split()[-3:]]
for i in range(3):
inertia[i] = constants.h / (8 * constants.pi * constants.pi * inertia[i] * 1e9) *constants.Na*1e23
rotation = NonlinearRotor(inertia=(inertia,"amu*angstrom^2"), symmetry=symmetry)
modes.append(rotation)
elif 'Rotational constant (GHZ):' in line:
inertia = [float(line.split()[3])]
inertia[0] = constants.h / (8 * constants.pi * constants.pi * inertia[0] * 1e9) *constants.Na*1e23
rotation = LinearRotor(inertia=(inertia[0],"amu*angstrom^2"), symmetry=symmetry)
modes.append(rotation)
# Read vibrational modes
elif 'Vibrational temperatures:' in line:
frequencies = []
frequencies.extend([float(d) for d in line.split()[2:]])
line = f.readline()
frequencies.extend([float(d) for d in line.split()[1:]])
line = f.readline()
while line.strip() != '':
frequencies.extend([float(d) for d in line.split()])
line = f.readline()
# Convert from K to cm^-1
if len(frequencies) > 0:
frequencies = [freq * 0.695039 for freq in frequencies] # kB = 0.695039 cm^-1/K
vibration = HarmonicOscillator(frequencies=(frequencies,"cm^-1"))
modes.append(vibration)
# Read ground-state energy
elif 'Sum of electronic and zero-point Energies=' in line:
E0 = float(line.split()[6]) * 4.35974394e-18 * constants.Na
# Read spin multiplicity if not explicitly given
elif 'Electronic' in line and inPartitionFunctions and spinMultiplicity is None:
spinMultiplicity = int(float(line.split()[1].replace('D', 'E')))
elif 'Log10(Q)' in line:
inPartitionFunctions = True
# Read the next line in the file
line = f.readline()
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
return Conformer(E0=(E0*0.001,"kJ/mol"), modes=modes, spinMultiplicity=spinMultiplicity, opticalIsomers=opticalIsomers)
def loadEnergy(self,frequencyScaleFactor=1.):
"""
Load the energy in J/mol from a Gaussian log file. The file is checked
for a complete basis set extrapolation; if found, that value is
returned. Only the last energy in the file is returned. The zero-point
energy is *not* included in the returned value; it is removed from the
CBS-QB3 value.
"""
modes = []
E0 = None; E0_cbs = None; scaledZPE = None
spinMultiplicity = 1
f = open(self.path, 'r')
line = f.readline()
while line != '':
if 'SCF Done:' in line:
E0 = float(line.split()[4]) * constants.E_h * constants.Na
elif 'CBS-QB3 (0 K)' in line:
E0_cbs = float(line.split()[3]) * constants.E_h * constants.Na
elif 'G3(0 K)' in line:
E0_cbs = float(line.split()[2]) * constants.E_h * constants.Na
# Read the ZPE from the "E(ZPE)=" line, as this is the scaled version.
# Gaussian defines the following as
# E (0 K) = Elec + E(ZPE),
# The ZPE is the scaled ZPE given by E(ZPE) in the log file,
# hence to get the correct Elec from E (0 K) we need to subtract the scaled ZPE
elif 'E(ZPE)' in line:
scaledZPE = float(line.split()[1]) * constants.E_h * constants.Na
elif '\\ZeroPoint=' in line:
line = line.strip() + f.readline().strip()
start = line.find('\\ZeroPoint=') + 11
end = line.find('\\', start)
scaledZPE = float(line[start:end]) * constants.E_h * constants.Na * frequencyScaleFactor
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if E0_cbs is not None:
if scaledZPE is None:
raise Exception('Unable to find zero-point energy in Gaussian log file.')
return E0_cbs - scaledZPE
elif E0 is not None:
return E0
else: raise Exception('Unable to find energy in Gaussian log file.')
def loadZeroPointEnergy(self):
"""
Load the unscaled zero-point energy in J/mol from a Gaussian log file.
"""
modes = []
ZPE = None
spinMultiplicity = 1
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Do NOT read the ZPE from the "E(ZPE)=" line, as this is the scaled version!
# We will read in the unscaled ZPE and later multiply the scaling factor
# from the input file
if 'Zero-point correction=' in line:
ZPE = float(line.split()[2]) * constants.E_h * constants.Na
elif '\\ZeroPoint=' in line:
line = line.strip() + f.readline().strip()
start = line.find('\\ZeroPoint=') + 11
end = line.find('\\', start)
ZPE = float(line[start:end]) * constants.E_h * constants.Na
# Read the next line in the file
line = f.readline()
# Close file when finished
f.close()
if ZPE is not None:
return ZPE
else:
raise Exception('Unable to find zero-point energy in Gaussian log file.')
def loadScanEnergies(self):
"""
Extract the optimized energies in J/mol from a log file, e.g. the
result of a Gaussian "Scan" quantum chemistry calculation.
"""
optfreq = False
rigidScan=False
# The array of potentials at each scan angle
Vlist = []
# Parse the Gaussian log file, extracting the energies of each
# optimized conformer in the scan
f = open(self.path, 'r')
line = f.readline()
while line != '':
# If the job contains a "freq" then we want to ignore the last energy
if ' freq ' in line:
optfreq = True
#if # scan is keyword instead of # opt, then this is a rigid scan job
#and parsing the energies is done a little differently
if '# scan' in line:
rigidScan=True
# The lines containing "SCF Done" give the energy at each
# iteration (even the intermediate ones)
if 'SCF Done:' in line:
E = float(line.split()[4])
#rigid scans will only not optimize, so just append every time it finds an energy.
if rigidScan:
Vlist.append(E)
# We want to keep the values of E that come most recently before
# the line containing "Optimization completed", since it refers
# to the optimized geometry
if 'Optimization completed' in line:
Vlist.append(E)
line = f.readline()
# Close file when finished
f.close()
#give warning in case this assumption is not true
if rigidScan==True:
print ' Assuming', os.path.basename(self.path), 'is the output from a rigid scan...'
Vlist = numpy.array(Vlist, numpy.float64)
# check to see if the scanlog indicates that a one of your reacting species may not be the lowest energy conformer
checkConformerEnergy(Vlist, self.path)
# Adjust energies to be relative to minimum energy conformer
# Also convert units from Hartree/particle to kJ/mol
Vlist -= numpy.min(Vlist)
Vlist *= constants.E_h * constants.Na
if optfreq: Vlist = Vlist[:-1]
# Determine the set of dihedral angles corresponding to the loaded energies
# This assumes that you start at 0.0, finish at 360.0, and take
# constant step sizes in between
angle = numpy.arange(0.0, 2*math.pi+0.00001, 2*math.pi/(len(Vlist)-1), numpy.float64)
return Vlist, angle
def loadNegativeFrequency(self):
"""
Return the negative frequency from a transition state frequency
calculation in cm^-1.
"""
frequencies = []
f = open(self.path, 'r')
line = f.readline()
while line != '':
# Read vibrational frequencies
if 'Frequencies --' in line:
frequencies.extend(line.split()[2:])
line = f.readline()
# Close file when finished
f.close()
frequencies = [float(freq) for freq in frequencies]
frequencies.sort()
frequency = [freq for freq in frequencies if freq < 0][0]
return frequency
|
|
import cassiopeia.type.dto.common
import cassiopeia.type.core.common
if cassiopeia.type.dto.common.sqlalchemy_imported:
import sqlalchemy
import sqlalchemy.orm
@cassiopeia.type.core.common.inheritdocs
class RawStats(cassiopeia.type.dto.common.CassiopeiaDto):
"""
assists int number of assists
barracksKilled int number of enemy inhibitors killed
championsKilled int number of champions killed
combatPlayerScore int the combat player score
consumablesPurchased int number of consumables purchased
damageDealtPlayer int total damage dealt
doubleKills int number of double kills
firstBlood int first blood
gold int amount of gold
goldEarned int total gold earned
goldSpent int total gold spent
item0 int ID of item 0
item1 int ID of item 1
item2 int ID of item 2
item3 int ID of item 3
item4 int ID of item 4
item5 int ID of item 5
item6 int ID of item 6
itemsPurchased int number of items purchased
killingSprees int number of killing sprees
largestCriticalStrike int largest critical strike
largestKillingSpree int largest killing spree
largestMultiKill int largest multi kill
legendaryItemsCreated int number of tier 3 items built
level int level
magicDamageDealtPlayer int total magic damage dealt
magicDamageDealtToChampions int total magic damage dealt to champions
magicDamageTaken int total magic damage taken
minionsDenied int total minions denied
minionsKilled int total minions killed
neutralMinionsKilled int total neutral minions killed
neutralMinionsKilledEnemyJungle int neutral minions killed in enemy jungle
neutralMinionsKilledYourJungle int neutral minions killed in own jungle
nexusKilled bool flag specifying if the summoner got the killing blow on the nexus
nodeCapture int number of nodes captured
nodeCaptureAssist int number of node capture assists
nodeNeutralize int number of nodes neutralized
nodeNeutralizeAssist int number of node neutralization assists
numDeaths int number of deaths
numItemsBought int number of items bought
objectivePlayerScore int objective player score
pentaKills int number of penta kills
physicalDamageDealtPlayer int total physical damage dealt
physicalDamageDealtToChampions int total physical damage dealt to champions
physicalDamageTaken int total physical damage taken
playerPosition int player position
playerRole int player role
quadraKills int number of quadra kills
sightWardsBought int number of sight wards bought
spell1Cast int number of times first champion spell was cast
spell2Cast int number of times second champion spell was cast
spell3Cast int number of times third champion spell was cast
spell4Cast int number of times fourth champion spell was cast
summonSpell1Cast int number of times summoner spell 1 was cast
summonSpell2Cast int number of times summoner spell 2 was cast
superMonsterKilled int number of super monsters killed
team int team
teamObjective int team objectives
timePlayed int time played
totalDamageDealt int total damage dealt
totalDamageDealtToChampions int total damage dealt to champions
totalDamageTaken int total damage taken
totalHeal int total healing done
totalPlayerScore int total player score
totalScoreRank int total score rank
totalTimeCrowdControlDealt int total crowd control time dealt
totalUnitsHealed int number of units healed
tripleKills int number of triple kills
trueDamageDealtPlayer int total true damage dealt
trueDamageDealtToChampions int total true damage dealt to champions
trueDamageTaken int total true damage taken
turretsKilled int number of turrets killed
unrealKills int number of unreal kills
victoryPointTotal int total victory points
visionWardsBought int number of vision wards bought
wardKilled int number of wards killed
wardPlaced int number of wards placed
win bool flag specifying whether or not this game was won
"""
def __init__(self, dictionary):
self.assists = dictionary.get("assists", 0)
self.barracksKilled = dictionary.get("barracksKilled", 0)
self.championsKilled = dictionary.get("championsKilled", 0)
self.combatPlayerScore = dictionary.get("combatPlayerScore", 0)
self.consumablesPurchased = dictionary.get("consumablesPurchased", 0)
self.damageDealtPlayer = dictionary.get("damageDealtPlayer", 0)
self.doubleKills = dictionary.get("doubleKills", 0)
self.firstBlood = dictionary.get("firstBlood", 0)
self.gold = dictionary.get("gold", 0)
self.goldEarned = dictionary.get("goldEarned", 0)
self.goldSpent = dictionary.get("goldSpent", 0)
self.item0 = dictionary.get("item0", 0)
self.item1 = dictionary.get("item1", 0)
self.item2 = dictionary.get("item2", 0)
self.item3 = dictionary.get("item3", 0)
self.item4 = dictionary.get("item4", 0)
self.item5 = dictionary.get("item5", 0)
self.item6 = dictionary.get("item6", 0)
self.itemsPurchased = dictionary.get("itemsPurchased", 0)
self.killingSprees = dictionary.get("killingSprees", 0)
self.largestCriticalStrike = dictionary.get("largestCriticalStrike", 0)
self.largestKillingSpree = dictionary.get("largestKillingSpree", 0)
self.largestMultiKill = dictionary.get("largestMultiKill", 0)
self.legendaryItemsCreated = dictionary.get("legendaryItemsCreated", 0)
self.level = dictionary.get("level", 0)
self.magicDamageDealtPlayer = dictionary.get("magicDamageDealtPlayer", 0)
self.magicDamageDealtToChampions = dictionary.get("magicDamageDealtToChampions", 0)
self.magicDamageTaken = dictionary.get("magicDamageTaken", 0)
self.minionsDenied = dictionary.get("minionsDenied", 0)
self.minionsKilled = dictionary.get("minionsKilled", 0)
self.neutralMinionsKilled = dictionary.get("neutralMinionsKilled", 0)
self.neutralMinionsKilledEnemyJungle = dictionary.get("neutralMinionsKilledEnemyJungle", 0)
self.neutralMinionsKilledYourJungle = dictionary.get("neutralMinionsKilledYourJungle", 0)
self.nexusKilled = dictionary.get("nexusKilled", False)
self.nodeCapture = dictionary.get("nodeCapture", 0)
self.nodeCaptureAssist = dictionary.get("nodeCaptureAssist", 0)
self.nodeNeutralize = dictionary.get("nodeNeutralize", 0)
self.nodeNeutralizeAssist = dictionary.get("nodeNeutralizeAssist", 0)
self.numDeaths = dictionary.get("numDeaths", 0)
self.numItemsBought = dictionary.get("numItemsBought", 0)
self.objectivePlayerScore = dictionary.get("objectivePlayerScore", 0)
self.pentaKills = dictionary.get("pentaKills", 0)
self.physicalDamageDealtPlayer = dictionary.get("physicalDamageDealtPlayer", 0)
self.physicalDamageDealtToChampions = dictionary.get("physicalDamageDealtToChampions", 0)
self.physicalDamageTaken = dictionary.get("physicalDamageTaken", 0)
self.playerPosition = dictionary.get("playerPosition", 0)
self.playerRole = dictionary.get("playerRole", 0)
self.quadraKills = dictionary.get("quadraKills", 0)
self.sightWardsBought = dictionary.get("sightWardsBought", 0)
self.spell1Cast = dictionary.get("spell1Cast", 0)
self.spell2Cast = dictionary.get("spell2Cast", 0)
self.spell3Cast = dictionary.get("spell3Cast", 0)
self.spell4Cast = dictionary.get("spell4Cast", 0)
self.summonSpell1Cast = dictionary.get("summonSpell1Cast", 0)
self.summonSpell2Cast = dictionary.get("summonSpell2Cast", 0)
self.superMonsterKilled = dictionary.get("superMonsterKilled", 0)
self.team = dictionary.get("team", 0)
self.teamObjective = dictionary.get("teamObjective", 0)
self.timePlayed = dictionary.get("timePlayed", 0)
self.totalDamageDealt = dictionary.get("totalDamageDealt", 0)
self.totalDamageDealtToChampions = dictionary.get("totalDamageDealtToChampions", 0)
self.totalDamageTaken = dictionary.get("totalDamageTaken", 0)
self.totalHeal = dictionary.get("totalHeal", 0)
self.totalPlayerScore = dictionary.get("totalPlayerScore", 0)
self.totalScoreRank = dictionary.get("totalScoreRank", 0)
self.totalTimeCrowdControlDealt = dictionary.get("totalTimeCrowdControlDealt", 0)
self.totalUnitsHealed = dictionary.get("totalUnitsHealed", 0)
self.tripleKills = dictionary.get("tripleKills", 0)
self.trueDamageDealtPlayer = dictionary.get("trueDamageDealtPlayer", 0)
self.trueDamageDealtToChampions = dictionary.get("trueDamageDealtToChampions", 0)
self.trueDamageTaken = dictionary.get("trueDamageTaken", 0)
self.turretsKilled = dictionary.get("turretsKilled", 0)
self.unrealKills = dictionary.get("unrealKills", 0)
self.victoryPointTotal = dictionary.get("victoryPointTotal", 0)
self.visionWardsBought = dictionary.get("visionWardsBought", 0)
self.wardKilled = dictionary.get("wardKilled", 0)
self.wardPlaced = dictionary.get("wardPlaced", 0)
self.win = dictionary.get("win", False)
@cassiopeia.type.core.common.inheritdocs
class Player(cassiopeia.type.dto.common.CassiopeiaDto):
"""
championId int champion id associated with player
summonerId int summoner id associated with player
teamId int team id associated with player
"""
def __init__(self, dictionary):
self.championId = dictionary.get("championId", 0)
self.summonerId = dictionary.get("summonerId", 0)
self.teamId = dictionary.get("teamId", 0)
@cassiopeia.type.core.common.inheritdocs
class Game(cassiopeia.type.dto.common.CassiopeiaDto):
"""
championId int champion ID associated with game
createDate int date that end game data was recorded, specified as epoch milliseconds
fellowPlayers list<Player> other players associated with the game
gameId int game ID
gameMode str game mode (Legal values: CLASSIC, ODIN, ARAM, TUTORIAL, ONEFORALL, ASCENSION, FIRSTBLOOD, KINGPORO)
gameType str game type (Legal values: CUSTOM_GAME, MATCHED_GAME, TUTORIAL_GAME)
invalid bool invalid flag
ipEarned int IP Earned
level int level
mapId int map ID
spell1 int ID of first summoner spell
spell2 int ID of second summoner spell
stats RawStats statistics associated with the game for this summoner
subType str game sub-type (Legal values: NONE, NORMAL, BOT, RANKED_SOLO_5x5, RANKED_PREMADE_3x3, RANKED_PREMADE_5x5, ODIN_UNRANKED, RANKED_TEAM_3x3, RANKED_TEAM_5x5, NORMAL_3x3, BOT_3x3, CAP_5x5, ARAM_UNRANKED_5x5, ONEFORALL_5x5, FIRSTBLOOD_1x1, FIRSTBLOOD_2x2, SR_6x6, URF, URF_BOT, NIGHTMARE_BOT, ASCENSION, HEXAKILL, KING_PORO, COUNTER_PICK)
teamId int team ID associated with game. Team ID 100 is blue team. Team ID 300 is purple team.
"""
def __init__(self, dictionary):
self.championId = dictionary.get("championId", 0)
self.createDate = dictionary.get("createDate", 0)
self.fellowPlayers = [(Player(player) if not isinstance(player, Player) else player) for player in dictionary.get("fellowPlayers", []) if player]
self.gameId = dictionary.get("gameId", 0)
self.gameMode = dictionary.get("gameMode", "")
self.gameType = dictionary.get("gameType", "")
self.invalid = dictionary.get("invalid", False)
self.ipEarned = dictionary.get("ipEarned", 0)
self.level = dictionary.get("level", 0)
self.mapId = dictionary.get("mapId", 0)
self.spell1 = dictionary.get("spell1", 0)
self.spell2 = dictionary.get("spell2", 0)
val = dictionary.get("stats", None)
self.stats = RawStats(val) if val and not isinstance(val, RawStats) else val
self.subType = dictionary.get("subType", "")
self.teamId = dictionary.get("teamId", 0)
@property
def champion_ids(self):
"""Gets all champion IDs contained in this object"""
ids = set()
ids.add(self.championId)
for p in self.fellowPlayers:
ids.add(p.championId)
return ids
@property
def summoner_ids(self):
"""Gets all summoner IDs contained in this object"""
ids = set()
for p in self.fellowPlayers:
if p.summonerId:
ids.add(p.summonerId)
return ids
@property
def summoner_spell_ids(self):
"""Gets all summoner spell IDs contained in this object"""
ids = set()
ids.add(self.spell1)
ids.add(self.spell2)
return ids
@property
def item_ids(self):
"""Gets all item IDs contained in this object"""
ids = set()
s = self.stats
if s.item0:
ids.add(s.item0)
if s.item1:
ids.add(s.item1)
if s.item2:
ids.add(s.item2)
if s.item3:
ids.add(s.item3)
if s.item4:
ids.add(s.item4)
if s.item5:
ids.add(s.item5)
if s.item6:
ids.add(s.item6)
return ids
@cassiopeia.type.core.common.inheritdocs
class RecentGames(cassiopeia.type.dto.common.CassiopeiaDto):
"""
games list<Game> collection of recent games played (max 10)
summonerId int summoner ID
"""
def __init__(self, dictionary):
self.games = [(Game(game) if not isinstance(game, Game) else game) for game in dictionary.get("games", []) if game]
self.summonerId = dictionary.get("summonerId", 0)
@property
def champion_ids(self):
"""Gets all champion IDs contained in this object"""
ids = set()
for game in self.games:
ids |= game.champion_ids
return ids
@property
def summoner_ids(self):
"""Gets all summoner IDs contained in this object"""
ids = set()
ids.add(self.summonerId)
for game in self.games:
ids |= game.summoner_ids
return ids
@property
def summoner_spell_ids(self):
"""Gets all sumoner spell IDs contained in this object"""
ids = set()
for game in self.games:
ids |= game.summoner_spell_ids
return ids
@property
def item_ids(self):
"""Gets all item IDs contained in this object"""
ids = set()
for game in self.games:
ids |= game.item_ids
return ids
###############################
# Dynamic SQLAlchemy bindings #
###############################
def _sa_bind_raw_stats():
global RawStats
@cassiopeia.type.core.common.inheritdocs
class RawStats(RawStats, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "GameRawStats"
assists = sqlalchemy.Column(sqlalchemy.Integer)
barracksKilled = sqlalchemy.Column(sqlalchemy.Integer)
championsKilled = sqlalchemy.Column(sqlalchemy.Integer)
combatPlayerScore = sqlalchemy.Column(sqlalchemy.Integer)
consumablesPurchased = sqlalchemy.Column(sqlalchemy.Integer)
damageDealtPlayer = sqlalchemy.Column(sqlalchemy.Integer)
doubleKills = sqlalchemy.Column(sqlalchemy.Integer)
firstBlood = sqlalchemy.Column(sqlalchemy.Integer)
gold = sqlalchemy.Column(sqlalchemy.Integer)
goldEarned = sqlalchemy.Column(sqlalchemy.Integer)
goldSpent = sqlalchemy.Column(sqlalchemy.Integer)
item0 = sqlalchemy.Column(sqlalchemy.Integer)
item1 = sqlalchemy.Column(sqlalchemy.Integer)
item2 = sqlalchemy.Column(sqlalchemy.Integer)
item3 = sqlalchemy.Column(sqlalchemy.Integer)
item4 = sqlalchemy.Column(sqlalchemy.Integer)
item5 = sqlalchemy.Column(sqlalchemy.Integer)
item6 = sqlalchemy.Column(sqlalchemy.Integer)
itemsPurchased = sqlalchemy.Column(sqlalchemy.Integer)
killingSprees = sqlalchemy.Column(sqlalchemy.Integer)
largestCriticalStrike = sqlalchemy.Column(sqlalchemy.Integer)
largestKillingSpree = sqlalchemy.Column(sqlalchemy.Integer)
largestMultiKill = sqlalchemy.Column(sqlalchemy.Integer)
legendaryItemsCreated = sqlalchemy.Column(sqlalchemy.Integer)
level = sqlalchemy.Column(sqlalchemy.Integer)
magicDamageDealtPlayer = sqlalchemy.Column(sqlalchemy.Integer)
magicDamageDealtToChampions = sqlalchemy.Column(sqlalchemy.Integer)
magicDamageTaken = sqlalchemy.Column(sqlalchemy.Integer)
minionsDenied = sqlalchemy.Column(sqlalchemy.Integer)
minionsKilled = sqlalchemy.Column(sqlalchemy.Integer)
neutralMinionsKilled = sqlalchemy.Column(sqlalchemy.Integer)
neutralMinionsKilledEnemyJungle = sqlalchemy.Column(sqlalchemy.Integer)
neutralMinionsKilledYourJungle = sqlalchemy.Column(sqlalchemy.Integer)
nexusKilled = sqlalchemy.Column(sqlalchemy.Boolean)
nodeCapture = sqlalchemy.Column(sqlalchemy.Integer)
nodeCaptureAssist = sqlalchemy.Column(sqlalchemy.Integer)
nodeNeutralize = sqlalchemy.Column(sqlalchemy.Integer)
nodeNeutralizeAssist = sqlalchemy.Column(sqlalchemy.Integer)
numDeaths = sqlalchemy.Column(sqlalchemy.Integer)
numItemsBought = sqlalchemy.Column(sqlalchemy.Integer)
objectivePlayerScore = sqlalchemy.Column(sqlalchemy.Integer)
pentaKills = sqlalchemy.Column(sqlalchemy.Integer)
physicalDamageDealtPlayer = sqlalchemy.Column(sqlalchemy.Integer)
physicalDamageDealtToChampions = sqlalchemy.Column(sqlalchemy.Integer)
physicalDamageTaken = sqlalchemy.Column(sqlalchemy.Integer)
playerPosition = sqlalchemy.Column(sqlalchemy.Integer)
playerRole = sqlalchemy.Column(sqlalchemy.Integer)
quadraKills = sqlalchemy.Column(sqlalchemy.Integer)
sightWardsBought = sqlalchemy.Column(sqlalchemy.Integer)
spell1Cast = sqlalchemy.Column(sqlalchemy.Integer)
spell2Cast = sqlalchemy.Column(sqlalchemy.Integer)
spell3Cast = sqlalchemy.Column(sqlalchemy.Integer)
spell4Cast = sqlalchemy.Column(sqlalchemy.Integer)
summonSpell1Cast = sqlalchemy.Column(sqlalchemy.Integer)
summonSpell2Cast = sqlalchemy.Column(sqlalchemy.Integer)
superMonsterKilled = sqlalchemy.Column(sqlalchemy.Integer)
team = sqlalchemy.Column(sqlalchemy.Integer)
teamObjective = sqlalchemy.Column(sqlalchemy.Integer)
timePlayed = sqlalchemy.Column(sqlalchemy.Integer)
totalDamageDealt = sqlalchemy.Column(sqlalchemy.Integer)
totalDamageDealtToChampions = sqlalchemy.Column(sqlalchemy.Integer)
totalDamageTaken = sqlalchemy.Column(sqlalchemy.Integer)
totalHeal = sqlalchemy.Column(sqlalchemy.Integer)
totalPlayerScore = sqlalchemy.Column(sqlalchemy.Integer)
totalScoreRank = sqlalchemy.Column(sqlalchemy.Integer)
totalTimeCrowdControlDealt = sqlalchemy.Column(sqlalchemy.Integer)
totalUnitsHealed = sqlalchemy.Column(sqlalchemy.Integer)
tripleKills = sqlalchemy.Column(sqlalchemy.Integer)
trueDamageDealtPlayer = sqlalchemy.Column(sqlalchemy.Integer)
trueDamageDealtToChampions = sqlalchemy.Column(sqlalchemy.Integer)
trueDamageTaken = sqlalchemy.Column(sqlalchemy.Integer)
turretsKilled = sqlalchemy.Column(sqlalchemy.Integer)
unrealKills = sqlalchemy.Column(sqlalchemy.Integer)
victoryPointTotal = sqlalchemy.Column(sqlalchemy.Integer)
visionWardsBought = sqlalchemy.Column(sqlalchemy.Integer)
wardKilled = sqlalchemy.Column(sqlalchemy.Integer)
wardPlaced = sqlalchemy.Column(sqlalchemy.Integer)
win = sqlalchemy.Column(sqlalchemy.Boolean)
_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_game_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey("Game.gameId", ondelete="CASCADE"))
def _sa_bind_player():
global Player
@cassiopeia.type.core.common.inheritdocs
class Player(Player, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "GamePlayer"
championId = sqlalchemy.Column(sqlalchemy.Integer)
summonerId = sqlalchemy.Column(sqlalchemy.Integer)
teamId = sqlalchemy.Column(sqlalchemy.Integer)
_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
_game_id = sqlalchemy.Column(sqlalchemy.Integer, sqlalchemy.ForeignKey("Game.gameId", ondelete="CASCADE"))
def _sa_bind_game():
global Game
@cassiopeia.type.core.common.inheritdocs
class Game(Game, cassiopeia.type.dto.common.BaseDB):
__tablename__ = "Game"
championId = sqlalchemy.Column(sqlalchemy.Integer)
createDate = sqlalchemy.Column(sqlalchemy.BigInteger)
fellowPlayers = sqlalchemy.orm.relationship("cassiopeia.type.dto.game.Player", cascade="all, delete-orphan", passive_deletes=True)
gameId = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
gameMode = sqlalchemy.Column(sqlalchemy.String(30))
gameType = sqlalchemy.Column(sqlalchemy.String(30))
invalid = sqlalchemy.Column(sqlalchemy.Boolean)
ipEarned = sqlalchemy.Column(sqlalchemy.Integer)
level = sqlalchemy.Column(sqlalchemy.Integer)
mapId = sqlalchemy.Column(sqlalchemy.Integer)
spell1 = sqlalchemy.Column(sqlalchemy.Integer)
spell2 = sqlalchemy.Column(sqlalchemy.Integer)
stats = sqlalchemy.orm.relationship("cassiopeia.type.dto.game.RawStats", uselist=False, cascade="all, delete-orphan", passive_deletes=True)
subType = sqlalchemy.Column(sqlalchemy.String(30))
teamId = sqlalchemy.Column(sqlalchemy.Integer)
def _sa_bind_all():
_sa_bind_raw_stats()
_sa_bind_player()
_sa_bind_game()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from decimal import Decimal
import mock
from django.forms import ValidationError
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from vcr import VCR
from paypal.pro.exceptions import PayPalFailure
from paypal.pro.fields import CreditCardField
from paypal.pro.helpers import VERSION, PayPalError, PayPalWPP
from paypal.pro.signals import payment_was_successful
from paypal.pro.views import PayPalPro
from .settings import TEMPLATE_DIRS, TEMPLATES
RF = RequestFactory()
REQUEST = RF.get("/pay/", REMOTE_ADDR="127.0.0.1:8000")
vcr = VCR(path_transformer=VCR.ensure_suffix('.yaml'))
class DummyPayPalWPP(PayPalWPP):
pass
# """Dummy class for testing PayPalWPP."""
# responses = {
# # @@@ Need some reals data here.
# "DoDirectPayment": """ack=Success×tamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=""", # noqa
# }
#
# def _request(self, data):
# return self.responses["DoDirectPayment"]
class CreditCardFieldTest(TestCase):
def test_CreditCardField(self):
field = CreditCardField()
field.clean('4797503429879309')
self.assertEqual(field.card_type, "Visa")
self.assertRaises(ValidationError, CreditCardField().clean, '1234567890123455')
def test_invalidCreditCards(self):
self.assertEqual(CreditCardField().clean('4797-5034-2987-9309'), '4797503429879309')
def ppp_wrapper(request, handler=None):
item = {"paymentrequest_0_amt": "10.00",
"inv": "inventory",
"custom": "tracking",
"cancelurl": "http://foo.com/cancel",
"returnurl": "http://foo.com/return"}
if handler is None:
handler = lambda nvp: nvp # NOP
ppp = PayPalPro(
item=item, # what you're selling
payment_template="payment.html", # template name for payment
confirm_template="confirmation.html", # template name for confirmation
success_url="/success/", # redirect location after success
nvp_handler=handler
)
return ppp(request)
@override_settings(TEMPLATE_DIRS=TEMPLATE_DIRS,
TEMPLATES=TEMPLATES)
class PayPalProTest(TestCase):
@vcr.use_cassette()
def test_get(self):
response = ppp_wrapper(RF.get('/'))
self.assertContains(response, 'Show me the money')
self.assertEqual(response.status_code, 200)
@vcr.use_cassette()
def test_get_redirect(self):
response = ppp_wrapper(RF.get('/', {'express': '1'}))
self.assertEqual(response.status_code, 302)
@vcr.use_cassette()
def test_validate_confirm_form_error(self):
response = ppp_wrapper(RF.post('/',
{'token': '123',
'PayerID': '456'}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context_data.get('errors', ''),
PayPalPro.errors['processing'])
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, 'doExpressCheckoutPayment', autospec=True)
def test_validate_confirm_form_ok(self, doExpressCheckoutPayment):
nvp = {'mock': True}
doExpressCheckoutPayment.return_value = nvp
received = []
def handler(nvp):
received.append(nvp)
response = ppp_wrapper(RF.post('/',
{'token': '123',
'PayerID': '456'}),
handler=handler)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/success/')
self.assertEqual(len(received), 1)
class PayPalWPPTest(TestCase):
def setUp(self):
self.item = {
'amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
# Handle different parameters for Express Checkout
self.ec_item = {
'paymentrequest_0_amt': '9.95',
'inv': 'inv',
'custom': 'custom',
'next': 'http://www.example.com/next/',
'returnurl': 'http://www.example.com/pay/',
'cancelurl': 'http://www.example.com/cancel/'
}
self.wpp = DummyPayPalWPP(REQUEST)
@vcr.use_cassette()
def test_doDirectPayment_missing_params(self):
data = {'firstname': 'Chewbacca'}
self.assertRaises(PayPalError, self.wpp.doDirectPayment, data)
@vcr.use_cassette()
def test_doDirectPayment_valid(self):
data = {
'firstname': 'Brave',
'lastname': 'Star',
'street': '1 Main St',
'city': u'San Jos\xe9',
'state': 'CA',
'countrycode': 'US',
'zip': '95131',
'acct': '4032039938039650',
'expdate': '112021',
'cvv2': '',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199', }
data.update(self.item)
self.assertTrue(self.wpp.doDirectPayment(data))
@vcr.use_cassette()
def test_doDirectPayment_invalid(self):
data = {
'firstname': 'Epic',
'lastname': 'Fail',
'street': '100 Georgia St',
'city': 'Vancouver',
'state': 'BC',
'countrycode': 'CA',
'zip': 'V6V 1V1',
'expdate': '012019',
'cvv2': '999',
'acct': '1234567890',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199', }
data.update(self.item)
self.assertRaises(PayPalFailure, self.wpp.doDirectPayment, data)
@vcr.use_cassette()
def test_doDirectPayment_valid_with_signal(self):
data = {
'firstname': 'Brave',
'lastname': 'Star',
'street': '1 Main St',
'city': u'San Jos\xe9',
'state': 'CA',
'countrycode': 'US',
'zip': '95131',
'acct': '4032039938039650',
'expdate': '112021',
'cvv2': '',
'creditcardtype': 'visa',
'ipaddress': '10.0.1.199', }
data.update(self.item)
self.got_signal = False
self.signal_obj = None
def handle_signal(sender, **kwargs):
self.got_signal = True
self.signal_obj = sender
payment_was_successful.connect(handle_signal)
self.assertTrue(self.wpp.doDirectPayment(data))
self.assertTrue(self.got_signal)
@vcr.use_cassette()
def test_setExpressCheckout(self):
nvp_obj = self.wpp.setExpressCheckout(self.ec_item)
self.assertEqual(nvp_obj.ack, "Success")
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_setExpressCheckout_deprecation(self, mock_request_object):
mock_request_object.return_value = 'ack=Success&token=EC-XXXX&version=%s'
item = self.ec_item.copy()
item.update({'amt': item['paymentrequest_0_amt']})
del item['paymentrequest_0_amt']
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
nvp_obj = self.wpp.setExpressCheckout(item)
# Make sure our warning was given
self.assertTrue(any(warned.category == DeprecationWarning
for warned in warning_list))
# Make sure the method still went through
call_args = mock_request_object.call_args
self.assertIn('PAYMENTREQUEST_0_AMT=%s' % item['amt'],
call_args[0][1])
self.assertEqual(nvp_obj.ack, "Success")
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doExpressCheckoutPayment(self, mock_request_object):
ec_token = 'EC-1234567890'
payerid = 'LXYZABC1234'
item = self.ec_item.copy()
item.update({'token': ec_token, 'payerid': payerid})
mock_request_object.return_value = 'ack=Success&token=%s&version=%spaymentinfo_0_amt=%s' % \
(ec_token, VERSION, self.ec_item['paymentrequest_0_amt'])
wpp = PayPalWPP(REQUEST)
wpp.doExpressCheckoutPayment(item)
call_args = mock_request_object.call_args
self.assertIn('VERSION=%s' % VERSION, call_args[0][1])
self.assertIn('METHOD=DoExpressCheckoutPayment', call_args[0][1])
self.assertIn('TOKEN=%s' % ec_token, call_args[0][1])
self.assertIn('PAYMENTREQUEST_0_AMT=%s' % item['paymentrequest_0_amt'],
call_args[0][1])
self.assertIn('PAYERID=%s' % payerid, call_args[0][1])
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doExpressCheckoutPayment_invalid(self, mock_request_object):
ec_token = 'EC-1234567890'
payerid = 'LXYZABC1234'
item = self.ec_item.copy()
item.update({'token': ec_token, 'payerid': payerid})
mock_request_object.return_value = 'ack=Failure&l_errorcode=42&l_longmessage0=Broken'
wpp = PayPalWPP(REQUEST)
with self.assertRaises(PayPalFailure):
wpp.doExpressCheckoutPayment(item)
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doExpressCheckoutPayment_deprecation(self, mock_request_object):
mock_request_object.return_value = 'ack=Success&token=EC-XXXX&version=%s'
ec_token = 'EC-1234567890'
payerid = 'LXYZABC1234'
item = self.ec_item.copy()
item.update({'amt': item['paymentrequest_0_amt'],
'token': ec_token,
'payerid': payerid})
del item['paymentrequest_0_amt']
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always")
nvp_obj = self.wpp.doExpressCheckoutPayment(item)
# Make sure our warning was given
self.assertTrue(any(warned.category == DeprecationWarning
for warned in warning_list))
# Make sure the method still went through
call_args = mock_request_object.call_args
self.assertIn('PAYMENTREQUEST_0_AMT=%s' % item['amt'],
call_args[0][1])
self.assertEqual(nvp_obj.ack, "Success")
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_createBillingAgreement(self, mock_request_object):
mock_request_object.return_value = 'ack=Success&billingagreementid=B-XXXXX&version=%s' % VERSION
wpp = PayPalWPP(REQUEST)
nvp = wpp.createBillingAgreement({'token': 'dummy token'})
call_args = mock_request_object.call_args
self.assertIn('VERSION=%s' % VERSION, call_args[0][1])
self.assertIn('METHOD=CreateBillingAgreement', call_args[0][1])
self.assertIn('TOKEN=dummy+token', call_args[0][1])
self.assertEqual(nvp.method, 'CreateBillingAgreement')
self.assertEqual(nvp.ack, 'Success')
mock_request_object.return_value = 'ack=Failure&l_errorcode=42&l_longmessage0=Broken'
with self.assertRaises(PayPalFailure):
nvp = wpp.createBillingAgreement({'token': 'dummy token'})
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doReferenceTransaction_valid(self, mock_request_object):
reference_id = 'B-1234'
amount = Decimal('10.50')
mock_request_object.return_value = (
'ack=Success&paymentstatus=Completed&amt=%s&version=%s&billingagreementid=%s' %
(amount, VERSION, reference_id))
wpp = PayPalWPP(REQUEST)
nvp = wpp.doReferenceTransaction({'referenceid': reference_id,
'amt': amount})
call_args = mock_request_object.call_args
self.assertIn('VERSION=%s' % VERSION, call_args[0][1])
self.assertIn('METHOD=DoReferenceTransaction', call_args[0][1])
self.assertIn('REFERENCEID=%s' % reference_id, call_args[0][1])
self.assertIn('AMT=%s' % amount, call_args[0][1])
self.assertEqual(nvp.method, 'DoReferenceTransaction')
self.assertEqual(nvp.ack, 'Success')
@vcr.use_cassette()
@mock.patch.object(PayPalWPP, '_request', autospec=True)
def test_doReferenceTransaction_invalid(self, mock_request_object):
reference_id = 'B-1234'
amount = Decimal('10.50')
mock_request_object.return_value = 'ack=Failure&l_errorcode=42&l_longmessage0=Broken'
wpp = PayPalWPP(REQUEST)
with self.assertRaises(PayPalFailure):
wpp.doReferenceTransaction({'referenceid': reference_id,
'amt': amount})
# -- DoExpressCheckoutPayment
# PayPal Request:
# {'amt': '10.00',
# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'custom': u'website_id=480&cname=1',
# 'inv': u'website-480-cname',
# 'method': 'DoExpressCheckoutPayment',
# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'payerid': u'BN5JZ2V7MLEV4',
# 'paymentaction': 'Sale',
# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'token': u'EC-6HW17184NE0084127'}
#
# PayPal Response:
# {'ack': 'Success',
# 'amt': '10.00',
# 'build': '848077',
# 'correlationid': '375f4773c3d34',
# 'currencycode': 'USD',
# 'feeamt': '0.59',
# 'ordertime': '2009-03-04T20:56:08Z',
# 'paymentstatus': 'Completed',
# 'paymenttype': 'instant',
# 'pendingreason': 'None',
# 'reasoncode': 'None',
# 'taxamt': '0.00',
# 'timestamp': '2009-03-04T20:56:09Z',
# 'token': 'EC-6HW17184NE0084127',
# 'transactionid': '3TG42202A7335864V',
# 'transactiontype': 'expresscheckout',
# 'version': '54.0'}
|
|
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import Mock, patch
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.constants import EventTypes
from synapse.rest.client import (
directory,
login,
profile,
room,
room_upgrade_rest_servlet,
)
from synapse.server import HomeServer
from synapse.types import UserID
from synapse.util import Clock
from tests import unittest
class _ShadowBannedBase(unittest.HomeserverTestCase):
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
# Create two users, one of which is shadow-banned.
self.banned_user_id = self.register_user("banned", "test")
self.banned_access_token = self.login("banned", "test")
self.store = self.hs.get_datastores().main
self.get_success(
self.store.set_shadow_banned(UserID.from_string(self.banned_user_id), True)
)
self.other_user_id = self.register_user("otheruser", "pass")
self.other_access_token = self.login("otheruser", "pass")
# To avoid the tests timing out don't add a delay to "annoy the requester".
@patch("random.randint", new=lambda a, b: 0)
class RoomTestCase(_ShadowBannedBase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
directory.register_servlets,
login.register_servlets,
room.register_servlets,
room_upgrade_rest_servlet.register_servlets,
]
def test_invite(self) -> None:
"""Invites from shadow-banned users don't actually get sent."""
# The create works fine.
room_id = self.helper.create_room_as(
self.banned_user_id, tok=self.banned_access_token
)
# Inviting the user completes successfully.
self.helper.invite(
room=room_id,
src=self.banned_user_id,
tok=self.banned_access_token,
targ=self.other_user_id,
)
# But the user wasn't actually invited.
invited_rooms = self.get_success(
self.store.get_invited_rooms_for_local_user(self.other_user_id)
)
self.assertEqual(invited_rooms, [])
def test_invite_3pid(self) -> None:
"""Ensure that a 3PID invite does not attempt to contact the identity server."""
identity_handler = self.hs.get_identity_handler()
identity_handler.lookup_3pid = Mock(
side_effect=AssertionError("This should not get called")
)
# The create works fine.
room_id = self.helper.create_room_as(
self.banned_user_id, tok=self.banned_access_token
)
# Inviting the user completes successfully.
channel = self.make_request(
"POST",
"/rooms/%s/invite" % (room_id,),
{"id_server": "test", "medium": "email", "address": "test@test.test"},
access_token=self.banned_access_token,
)
self.assertEqual(200, channel.code, channel.result)
# This should have raised an error earlier, but double check this wasn't called.
identity_handler.lookup_3pid.assert_not_called()
def test_create_room(self) -> None:
"""Invitations during a room creation should be discarded, but the room still gets created."""
# The room creation is successful.
channel = self.make_request(
"POST",
"/_matrix/client/r0/createRoom",
{"visibility": "public", "invite": [self.other_user_id]},
access_token=self.banned_access_token,
)
self.assertEqual(200, channel.code, channel.result)
room_id = channel.json_body["room_id"]
# But the user wasn't actually invited.
invited_rooms = self.get_success(
self.store.get_invited_rooms_for_local_user(self.other_user_id)
)
self.assertEqual(invited_rooms, [])
# Since a real room was created, the other user should be able to join it.
self.helper.join(room_id, self.other_user_id, tok=self.other_access_token)
# Both users should be in the room.
users = self.get_success(self.store.get_users_in_room(room_id))
self.assertCountEqual(users, ["@banned:test", "@otheruser:test"])
def test_message(self) -> None:
"""Messages from shadow-banned users don't actually get sent."""
room_id = self.helper.create_room_as(
self.other_user_id, tok=self.other_access_token
)
# The user should be in the room.
self.helper.join(room_id, self.banned_user_id, tok=self.banned_access_token)
# Sending a message should complete successfully.
result = self.helper.send_event(
room_id=room_id,
type=EventTypes.Message,
content={"msgtype": "m.text", "body": "with right label"},
tok=self.banned_access_token,
)
self.assertIn("event_id", result)
event_id = result["event_id"]
latest_events = self.get_success(
self.store.get_latest_event_ids_in_room(room_id)
)
self.assertNotIn(event_id, latest_events)
def test_upgrade(self) -> None:
"""A room upgrade should fail, but look like it succeeded."""
# The create works fine.
room_id = self.helper.create_room_as(
self.banned_user_id, tok=self.banned_access_token
)
channel = self.make_request(
"POST",
"/_matrix/client/r0/rooms/%s/upgrade" % (room_id,),
{"new_version": "6"},
access_token=self.banned_access_token,
)
self.assertEqual(200, channel.code, channel.result)
# A new room_id should be returned.
self.assertIn("replacement_room", channel.json_body)
new_room_id = channel.json_body["replacement_room"]
# It doesn't really matter what API we use here, we just want to assert
# that the room doesn't exist.
summary = self.get_success(self.store.get_room_summary(new_room_id))
# The summary should be empty since the room doesn't exist.
self.assertEqual(summary, {})
def test_typing(self) -> None:
"""Typing notifications should not be propagated into the room."""
# The create works fine.
room_id = self.helper.create_room_as(
self.banned_user_id, tok=self.banned_access_token
)
channel = self.make_request(
"PUT",
"/rooms/%s/typing/%s" % (room_id, self.banned_user_id),
{"typing": True, "timeout": 30000},
access_token=self.banned_access_token,
)
self.assertEqual(200, channel.code)
# There should be no typing events.
event_source = self.hs.get_event_sources().sources.typing
self.assertEqual(event_source.get_current_key(), 0)
# The other user can join and send typing events.
self.helper.join(room_id, self.other_user_id, tok=self.other_access_token)
channel = self.make_request(
"PUT",
"/rooms/%s/typing/%s" % (room_id, self.other_user_id),
{"typing": True, "timeout": 30000},
access_token=self.other_access_token,
)
self.assertEqual(200, channel.code)
# These appear in the room.
self.assertEqual(event_source.get_current_key(), 1)
events = self.get_success(
event_source.get_new_events(
user=UserID.from_string(self.other_user_id),
from_key=0,
limit=None,
room_ids=[room_id],
is_guest=False,
)
)
self.assertEqual(
events[0],
[
{
"type": "m.typing",
"room_id": room_id,
"content": {"user_ids": [self.other_user_id]},
}
],
)
# To avoid the tests timing out don't add a delay to "annoy the requester".
@patch("random.randint", new=lambda a, b: 0)
class ProfileTestCase(_ShadowBannedBase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
login.register_servlets,
profile.register_servlets,
room.register_servlets,
]
def test_displayname(self) -> None:
"""Profile changes should succeed, but don't end up in a room."""
original_display_name = "banned"
new_display_name = "new name"
# Join a room.
room_id = self.helper.create_room_as(
self.banned_user_id, tok=self.banned_access_token
)
# The update should succeed.
channel = self.make_request(
"PUT",
"/_matrix/client/r0/profile/%s/displayname" % (self.banned_user_id,),
{"displayname": new_display_name},
access_token=self.banned_access_token,
)
self.assertEqual(200, channel.code, channel.result)
self.assertEqual(channel.json_body, {})
# The user's display name should be updated.
channel = self.make_request(
"GET", "/profile/%s/displayname" % (self.banned_user_id,)
)
self.assertEqual(channel.code, 200, channel.result)
self.assertEqual(channel.json_body["displayname"], new_display_name)
# But the display name in the room should not be.
message_handler = self.hs.get_message_handler()
event = self.get_success(
message_handler.get_room_data(
self.banned_user_id,
room_id,
"m.room.member",
self.banned_user_id,
)
)
self.assertEqual(
event.content, {"membership": "join", "displayname": original_display_name}
)
def test_room_displayname(self) -> None:
"""Changes to state events for a room should be processed, but not end up in the room."""
original_display_name = "banned"
new_display_name = "new name"
# Join a room.
room_id = self.helper.create_room_as(
self.banned_user_id, tok=self.banned_access_token
)
# The update should succeed.
channel = self.make_request(
"PUT",
"/_matrix/client/r0/rooms/%s/state/m.room.member/%s"
% (room_id, self.banned_user_id),
{"membership": "join", "displayname": new_display_name},
access_token=self.banned_access_token,
)
self.assertEqual(200, channel.code, channel.result)
self.assertIn("event_id", channel.json_body)
# The display name in the room should not be changed.
message_handler = self.hs.get_message_handler()
event = self.get_success(
message_handler.get_room_data(
self.banned_user_id,
room_id,
"m.room.member",
self.banned_user_id,
)
)
self.assertEqual(
event.content, {"membership": "join", "displayname": original_display_name}
)
|
|
import datetime as dt
import pytest
import pytz
from stix2.base import _STIXBase
from stix2.exceptions import (
CustomContentError, ExtraPropertiesError, STIXError,
)
from stix2.properties import (
BinaryProperty, BooleanProperty, EmbeddedObjectProperty, EnumProperty,
FloatProperty, HashesProperty, HexProperty, IntegerProperty, ListProperty,
OpenVocabProperty, Property, StringProperty, TimestampProperty,
TypeProperty,
)
def test_property():
p = Property()
assert p.required is False
assert p.clean('foo') == ('foo', False)
assert p.clean(3) == (3, False)
def test_basic_clean():
class Prop(Property):
def clean(self, value):
if value == 42:
return value
else:
raise ValueError("Must be 42")
p = Prop()
assert p.clean(42) == 42
with pytest.raises(ValueError):
p.clean(41)
def test_property_default():
class Prop(Property):
def default(self):
return 77
p = Prop()
assert p.default() == 77
def test_fixed_property():
p = Property(fixed="2.0")
assert p.clean("2.0")
with pytest.raises(ValueError):
assert p.clean("x") is False
with pytest.raises(ValueError):
assert p.clean(2.0) is False
assert p.default() == "2.0"
assert p.clean(p.default())
def test_property_fixed_and_required():
with pytest.raises(STIXError):
Property(default=lambda: 3, required=True)
def test_list_property_property_type():
p = ListProperty(StringProperty)
result = p.clean(['abc', 'xyz'], False)
assert result == (['abc', 'xyz'], False)
with pytest.raises(ValueError):
p.clean([], False)
def test_list_property_property_type_custom():
class TestObj(_STIXBase):
_type = "test"
_properties = {
"foo": StringProperty(),
}
p = ListProperty(EmbeddedObjectProperty(type=TestObj))
objs_custom = [
TestObj(foo="abc", bar=123, allow_custom=True),
TestObj(foo="xyz"),
]
result = p.clean(objs_custom, True)
assert result == (objs_custom, True)
with pytest.raises(CustomContentError):
p.clean(objs_custom, False)
dicts_custom = [
{"foo": "abc", "bar": 123},
{"foo": "xyz"},
]
result = p.clean(dicts_custom, True)
assert result == (objs_custom, True)
with pytest.raises(ExtraPropertiesError):
p.clean(dicts_custom, False)
def test_list_property_object_type():
class TestObj(_STIXBase):
_type = "test"
_properties = {
"foo": StringProperty(),
}
p = ListProperty(TestObj)
objs = [TestObj(foo="abc"), TestObj(foo="xyz")]
result = p.clean(objs, False)
assert result == (objs, False)
dicts = [{"foo": "abc"}, {"foo": "xyz"}]
result = p.clean(dicts, False)
assert result == (objs, False)
def test_list_property_object_type_custom():
class TestObj(_STIXBase):
_type = "test"
_properties = {
"foo": StringProperty(),
}
p = ListProperty(TestObj)
objs_custom = [
TestObj(foo="abc", bar=123, allow_custom=True),
TestObj(foo="xyz"),
]
result = p.clean(objs_custom, True)
assert result == (objs_custom, True)
with pytest.raises(CustomContentError):
p.clean(objs_custom, False)
dicts_custom = [
{"foo": "abc", "bar": 123},
{"foo": "xyz"},
]
result = p.clean(dicts_custom, True)
assert result == (objs_custom, True)
with pytest.raises(ExtraPropertiesError):
p.clean(dicts_custom, False)
def test_list_property_bad_element_type():
with pytest.raises(TypeError):
ListProperty(1)
def test_list_property_bad_value_type():
class TestObj(_STIXBase):
_type = "test"
_properties = {
"foo": StringProperty(),
}
list_prop = ListProperty(TestObj)
with pytest.raises(ValueError):
list_prop.clean([1], False)
def test_string_property():
prop = StringProperty()
assert prop.clean('foobar')
assert prop.clean(1)
assert prop.clean([1, 2, 3])
def test_type_property():
prop = TypeProperty('my-type')
assert prop.clean('my-type')
with pytest.raises(ValueError):
prop.clean('not-my-type')
assert prop.clean(prop.default())
@pytest.mark.parametrize(
"value", [
2,
-1,
3.14,
False,
],
)
def test_integer_property_valid(value):
int_prop = IntegerProperty()
assert int_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
-1,
-100,
-50 * 6,
],
)
def test_integer_property_invalid_min_with_constraints(value):
int_prop = IntegerProperty(min=0, max=180)
with pytest.raises(ValueError) as excinfo:
int_prop.clean(value)
assert "minimum value is" in str(excinfo.value)
@pytest.mark.parametrize(
"value", [
181,
200,
50 * 6,
],
)
def test_integer_property_invalid_max_with_constraints(value):
int_prop = IntegerProperty(min=0, max=180)
with pytest.raises(ValueError) as excinfo:
int_prop.clean(value)
assert "maximum value is" in str(excinfo.value)
@pytest.mark.parametrize(
"value", [
"something",
StringProperty(),
],
)
def test_integer_property_invalid(value):
int_prop = IntegerProperty()
with pytest.raises(ValueError):
int_prop.clean(value)
@pytest.mark.parametrize(
"value", [
2,
-1,
3.14,
False,
],
)
def test_float_property_valid(value):
int_prop = FloatProperty()
assert int_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
"something",
StringProperty(),
],
)
def test_float_property_invalid(value):
int_prop = FloatProperty()
with pytest.raises(ValueError):
int_prop.clean(value)
@pytest.mark.parametrize(
"value", [
True,
False,
'True',
'False',
'true',
'false',
'TRUE',
'FALSE',
'T',
'F',
't',
'f',
1,
0,
],
)
def test_boolean_property_valid(value):
bool_prop = BooleanProperty()
assert bool_prop.clean(value) is not None
@pytest.mark.parametrize(
"value", [
'abc',
['false'],
{'true': 'true'},
2,
-1,
],
)
def test_boolean_property_invalid(value):
bool_prop = BooleanProperty()
with pytest.raises(ValueError):
bool_prop.clean(value)
@pytest.mark.parametrize(
"value", [
'2017-01-01T12:34:56Z',
],
)
def test_timestamp_property_valid(value):
ts_prop = TimestampProperty()
assert ts_prop.clean(value) == (dt.datetime(2017, 1, 1, 12, 34, 56, tzinfo=pytz.utc), False)
def test_timestamp_property_invalid():
ts_prop = TimestampProperty()
with pytest.raises(TypeError):
ts_prop.clean(1)
with pytest.raises(ValueError):
ts_prop.clean("someday sometime")
def test_binary_property():
bin_prop = BinaryProperty()
assert bin_prop.clean("TG9yZW0gSXBzdW0=")
with pytest.raises(ValueError):
bin_prop.clean("foobar")
def test_hex_property():
hex_prop = HexProperty()
assert hex_prop.clean("4c6f72656d20497073756d")
with pytest.raises(ValueError):
hex_prop.clean("foobar")
@pytest.mark.parametrize(
"value", [
['a', 'b', 'c'],
('a', 'b', 'c'),
'b',
],
)
def test_enum_property_valid(value):
enum_prop = EnumProperty(value)
assert enum_prop.clean('b', False)
def test_enum_property_clean():
enum_prop = EnumProperty(['1'])
assert enum_prop.clean(1, False) == ('1', False)
def test_enum_property_invalid():
enum_prop = EnumProperty(['a', 'b', 'c'])
with pytest.raises(ValueError):
enum_prop.clean('z', False)
with pytest.raises(ValueError):
enum_prop.clean('z', True)
@pytest.mark.xfail(
reason="Temporarily disabled custom open vocab enforcement",
strict=True,
)
@pytest.mark.parametrize(
"vocab", [
['a', 'b', 'c'],
('a', 'b', 'c'),
'b',
],
)
def test_openvocab_property(vocab):
ov_prop = OpenVocabProperty(vocab)
assert ov_prop.clean("b", False) == ("b", False)
assert ov_prop.clean("b", True) == ("b", False)
with pytest.raises(CustomContentError):
ov_prop.clean("d", False)
assert ov_prop.clean("d", True) == ("d", True)
@pytest.mark.parametrize(
"value", [
{"sha256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b"},
[('MD5', '2dfb1bcc980200c6706feee399d41b3f'), ('RIPEMD-160', 'b3a8cd8a27c90af79b3c81754f267780f443dfef')],
],
)
def test_hashes_property_valid(value):
hash_prop = HashesProperty(["sha256", "md5", "ripemd160"])
_, has_custom = hash_prop.clean(value, False)
assert not has_custom
@pytest.mark.parametrize(
"value", [
{"MD5": "a"},
{"SHA-256": "2dfb1bcc980200c6706feee399d41b3f"},
],
)
def test_hashes_property_invalid(value):
hash_prop = HashesProperty(["sha256", "md5"])
with pytest.raises(ValueError):
hash_prop.clean(value, False)
def test_hashes_property_custom():
value = {
"sha256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b",
"abc-123": "aaaaaaaaaaaaaaaaaaaaa",
}
expected_cleaned_value = {
# cleaning transforms recognized hash algorithm names to the spec-
# mandated name.
"SHA-256": "6db12788c37247f2316052e142f42f4b259d6561751e5f401a1ae2a6df9c674b",
"abc-123": "aaaaaaaaaaaaaaaaaaaaa",
}
hash_prop = HashesProperty(["SHA-256"])
result = hash_prop.clean(value, True)
assert result == (expected_cleaned_value, True)
with pytest.raises(CustomContentError):
hash_prop.clean(value, False)
def test_hashes_no_library_support():
prop = HashesProperty(["foo"])
result = prop.clean({"foo": "bar"}, False)
assert result == ({"foo": "bar"}, False)
result = prop.clean({"foo": "bar"}, True)
assert result == ({"foo": "bar"}, False)
with pytest.raises(CustomContentError):
# require exact name match for unsupported hash algorithms
prop.clean({"FOO": "bar"}, False)
result = prop.clean({"FOO": "bar"}, True)
assert result == ({"FOO": "bar"}, True)
|
|
import csv
import json
import logging
import os
import sys
import time
from .qt import *
from . import config as cfg
from .document import Document, Track, Event
from .info import InfoWindow
from .mplayer import MPlayer
from .status import StatusWindow
from .templates import templates
from .timeline import TimelineWindow
from .util import next_time_mode, format_time
log = logging.getLogger(__name__)
WINDOW_NAMES = 'timeline', 'status', 'info'
COMBINE_MODE_REPLACE = 'replace'
COMBINE_MODE_ADD = 'add'
class Application(QObject):
def __init__(self, argv):
QObject.__init__(self)
self.app = QtGui.QApplication(argv)
# The document is accessed through a property because we need to
# signal rebuilding the api whenever it is changed.
self._doc = Document()
self._video = None
self.time = 0 # Our guess for the current time.
self.video_sync_time = 0 # Last time we synced to mplayer's time.
self.video_time_at_sync = 0 # What the time was when we synced.
# The global time representation mode. Rotate this by calling
# self.next_time_mode()
self.time_mode = next_time_mode()
# Some state to keep track of which keys are pressed.
self.pressed_keys = set()
# A mapping of keys to tracks. Only changed when a new doc is loaded
# or new tracks are added.
self.key_to_track = {}
# A mapping of keys to events that are in progress.
self.key_to_open_event = {}
# A mapping of group names to events that are in progress.
self.group_to_open_event = {}
# Build up the three windows.
self.timeline = TimelineWindow(self)
self.status = StatusWindow(self)
self.info = InfoWindow(self)
self.setup_menu()
# Setup our main loop timer.
self.idle_timer = QTimer()
self.idle_timer.setInterval(10) # milliseconds
self.idle_timer.timerEvent = self.main_loop
self.last_loop_time = 0
def setup_menu(self):
menubar = self.timeline.menuBar()
# self.status.setMenuBar(menubar)
# self.info.setMenuBar(menubar)
file_menu = menubar.addMenu("File")
new = QAction("New", self.timeline)
new.setShortcut('Ctrl+N')
connect(new, SIGNAL('triggered()'), self.handle_file_new)
file_menu.addAction(new)
new_from_template = QMenu("New From Template", self.timeline)
file_menu.addMenu(new_from_template)
for name in templates:
action = QAction(name, self.timeline)
connect(action, SIGNAL('triggered()'), lambda name=name: self.handle_file_new_from_template(name))
new_from_template.addAction(action)
open_ = QAction("Open...", self.timeline)
open_.setShortcut('Ctrl+O')
connect(open_, SIGNAL('triggered()'), self.handle_file_open)
file_menu.addAction(open_)
file_menu.addSeparator()
import_video = QAction("Import Video...", self.timeline)
import_video.setShortcut('Ctrl+I')
connect(import_video, SIGNAL('triggered()'), self.handle_file_import_video)
file_menu.addAction(import_video)
export = QAction("Export Data...", self.timeline)
export.setShortcut('Ctrl+E')
connect(export, SIGNAL('triggered()'), self.handle_file_export)
file_menu.addAction(export)
file_menu.addSeparator()
save = QAction("Save", self.timeline)
save.setShortcut('Ctrl+S')
connect(save, SIGNAL('triggered()'), self.handle_file_save)
file_menu.addAction(save)
save_as = QAction("Save As...", self.timeline)
save_as.setShortcut('Ctrl+Shift+S')
connect(save_as, SIGNAL('triggered()'), self.handle_file_save_as)
file_menu.addAction(save_as)
quit = QAction("Quit", self.timeline)
quit.setShortcut('Ctrl+Q')
connect(quit, SIGNAL('triggered()'), self.handle_file_quit)
file_menu.addAction(quit)
edit_menu = menubar.addMenu("Edit")
undo = QAction("Undo", self.timeline)
undo.setShortcut("Ctrl+Z")
connect(undo, SIGNAL('triggered()'), self.handle_edit_undo)
edit_menu.addAction(undo)
view_menu = menubar.addMenu("View")
zoom_in = QAction("Zoom In", self.timeline)
zoom_in.setShortcut('Ctrl+-')
connect(zoom_in, SIGNAL('triggered()'), self.timeline.zoom_in)
view_menu.addAction(zoom_in)
zoom_out = QAction("Zoom Out", self.timeline)
zoom_out.setShortcut("Ctrl++")
connect(zoom_out, SIGNAL('triggered()'), self.timeline.zoom_out)
view_menu.addAction(zoom_out)
window_menu = menubar.addMenu("Window")
def make_handler(name):
def handler():
window = getattr(self, name)
window.show()
if name == 'timeline':
window.layout()
else:
window.repaint()
window.activateWindow()
window.raise_()
return handler
for i, name in enumerate(WINDOW_NAMES):
action = QtGui.QAction(name.capitalize(), self.timeline)
action.setShortcut('Ctrl+%d' % (i + 1))
connect(action, SIGNAL('triggered()'), make_handler(name))
window_menu.addAction(action)
def ask_to_save_if_required(self):
if self.doc:
dialog = QMessageBox()
dialog.setText("The document (may) have been modified.");
dialog.setInformativeText("Do you want to save your changes?");
dialog.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel);
dialog.setDefaultButton(QMessageBox.Save);
res = dialog.exec_()
if res == QMessageBox.Cancel:
raise ValueError('cancel')
return res == QMessageBox.Save
def handle_file_new(self):
log.debug('File > New')
try:
if self.ask_to_save_if_required():
self.save()
self.doc = Document()
except ValueError:
pass
def handle_file_new_from_template(self, name):
log.debug('File > New From Template > %r' % name)
try:
if self.ask_to_save_if_required():
self.save()
doc = Document()
for kwargs in templates[name]:
doc.tracks.append(Track(**kwargs))
self.doc = doc
except ValueError:
pass
def handle_file_open(self):
log.debug('File > Open')
try:
if self.ask_to_save_if_required():
self.save()
path = str(QFileDialog.getOpenFileName(self.timeline,
caption="Pick a file to open.",
directory="~",
filter="ScoreBee (*.scorb)",
))
if not path:
raise ValueError('user cancelled')
doc = Document.from_string(open(path).read())
doc.path = path
self.doc = doc
except ValueError:
pass
def handle_file_import_video(self):
# Ask them if they want to override it.
if self.doc.video_path:
dialog = QMessageBox()
dialog.setText("This document's video will be forgotten.");
dialog.setInformativeText("Continue?");
dialog.setStandardButtons(QMessageBox.Yes | QMessageBox.No);
dialog.setDefaultButton(QMessageBox.No);
if dialog.exec_() == QMessageBox.No:
return
# Get a movie.
path = str(QFileDialog.getOpenFileName(self.timeline,
caption="Pick a video to score.",
directory="~",
filter="Video (*.avi *.mov *.mp4)",
))
if not path:
return
try:
video = MPlayer(path)
except:
log.exception('error while opening video')
dialog = QMessageBox()
dialog.setIcon(QMessageBox.Critical)
dialog.setText("Error while importing video.");
dialog.setInformativeText("MPlayer did not understand the file.");
dialog.setStandardButtons(QMessageBox.Ok);
dialog.setDefaultButton(QMessageBox.Ok);
dialog.exec_()
return
self.doc.video_path = path
self._video = None
self.emit(SIGNAL('doc_changed'))
def handle_file_export(self):
log.debug('File > Export')
path = str(QFileDialog.getSaveFileName(self.timeline,
caption="Export Data",
directory='/Users/mikeboers/Desktop',
filter="Spreadsheet (*.csv)",
))
if not len(path):
return
fh = csv.writer(open(path, 'w'))
header = []
for track in self.doc.tracks:
header.extend([track.name, ''])
fh.writerow(header)
max_length = max(len(track.events) for track in self.doc.tracks)
for i in xrange(max_length):
row = []
for track in self.doc.tracks:
if len(track.events) > i:
# XXX: This only works when there is a video at all.
row.extend(['%.3f' % (float(x) / float(self.video.fps)) for x in [track.events[i].start, track.events[i].end]])
else:
row.extend(['', ''])
fh.writerow(row)
def handle_file_save(self):
log.debug('File > Save')
self.save()
def handle_file_save_as(self):
log.debug('File > Save As')
self.save(save_as=True)
def handle_file_quit(self):
log.debug('File > Quit')
try:
if self.ask_to_save_if_required():
self.save()
self.app.quit()
except ValueError:
pass
def handle_edit_undo(self):
log.debug('Edit > Undo')
pass
def save(self, save_as=False):
if self.doc.path is None or save_as:
path = str(QFileDialog.getSaveFileName(self.timeline,
caption="Save File",
directory='/Users/mikeboers/Desktop',
filter="ScoreBee (*.scorb)",
))
if not len(path):
raise ValueError('user canceled')
else:
path = self.doc.path
# Make a backup if it already exists:
if os.path.exists(path):
backup_dir = os.path.dirname(path) + '/backup'
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
backup_path = backup_dir + '/' + time.strftime('%Y-%m-%dT%H-%M-%S') + '.' + os.path.basename(path)
open(backup_path, 'w').write(open(path, 'r').read())
# Do the saving
open(path, 'w').write(self.doc.as_string())
self.doc.path = path
@property
def video(self):
"""Always a good (ie. running) mplayer."""
if self._video is None or not self._video.is_running:
if self.doc.is_ready:
self._video = MPlayer(
path=self.doc.video_path,
conf=os.path.abspath(__file__ + '/../../settings/mplayer.txt')
)
return self._video
def format_time(self, time=None):
"""Format a time with the current time format mode."""
return format_time(self.time if time is None else time, self.video.fps, self.time_mode)
def next_time_mode(self):
"""Rotate the time format mode."""
self.time_mode = next_time_mode(self.time_mode)
self.emit(SIGNAL('time_mode_changed'))
@property
def doc(self):
"""The open document.
Setting this has major side effects."""
return self._doc
@property
def is_ready(self):
if not self.doc.video_path:
return False
return True
@doc.setter
def doc(self, doc):
# Delete all the existing gui.
for track in self._doc.tracks:
if track.ui:
track.ui.destroy()
track.ui.deleteLater()
self._doc = doc
# Destroy the old video player. A new one will be created when requested.
self._video = None
# Setup key tracking mechanisms.
self.key_to_track = dict((track.key_code, track) for track in doc.tracks)
if self.is_ready:
self.video.time = 0
self.sync()
# Force everything to deal with the new document.
self.emit(SIGNAL('doc_changed'))
def run(self):
# Load and apply all of the window settings.
# TODO: move this onto the window class itself.
if os.path.exists(cfg.WINDOW_SETTINGS_PATH):
window_prefs = json.load(open(cfg.WINDOW_SETTINGS_PATH))
for name, data in window_prefs.iteritems():
window = getattr(self, name)
window.move(*data['pos'])
window.resize(*data['size'])
self.status.show()
self.info.show()
self.timeline.show()
# Collect all of the key press events here.
for name in WINDOW_NAMES:
window = getattr(self, name)
window.keyPressEvent = self.keyPressEvent
window.keyReleaseEvent = self.keyReleaseEvent
# Start up the document
# self.doc = Document()
# Run the main loops.
self.idle_timer.start()
try:
self.app.exec_()
finally:
# HACK: Kill the MPlayer
self._video = None
# Save window sizes and locations for the next startup.
window_prefs = {}
for name in WINDOW_NAMES:
window_prefs[name] = dict(
pos=tuple(getattr(self, name).pos()),
size=tuple(getattr(self, name).size()),
)
json.dump(window_prefs, open(cfg.WINDOW_SETTINGS_PATH, 'w'), indent=4)
@property
def frame(self):
return int(self.time * self.video.fps)
def main_loop(self, event=None, force_sync=False):
"""Event that is triggered every couple milliseconds.
Treat this as our main loop.
"""
if not self.is_ready:
return
now = time.time()
time_delta = now - self.video_sync_time
if force_sync or now - self.video_sync_time > cfg.SYNC_INTERVAL:
self.sync()
elif not self.video.is_paused:
self.time = self.video_time_at_sync + self.video.speed * time_delta
self.emit(SIGNAL('time_changed'))
for event in self.key_to_open_event.values():
event.end = self.frame
self.emit(SIGNAL('event_updated'), event)
def sync(self, threshold=1.0/30, verbose=False):
"""Sync up our time keeping with the actual time in the media player.
We also use this to measure what the real speed is.
"""
start_time = time.time()
if start_time - self.video_sync_time < threshold:
if verbose:
log.debug('sync under threshold')
return
new_time = self.video.time
delta = new_time - self.time
self.video_time_at_sync = self.time = new_time
self.video_sync_time = start_time
if delta:
self.emit(SIGNAL('time_changed'), delta)
self.emit(SIGNAL('synced'), delta)
if verbose:
log.debug('synced in %.2fms' % (1000 * (time.time() - start_time)))
def toggle_pause(self):
if self.video.is_paused:
self.video.play()
else:
self.video.pause()
self.emit(SIGNAL('pause_toggled'))
def delete_event(self, track, event):
# XXX: This is really gross... clean this up.
event_i = track.events.index(event)
assert event_i >= 0
event.ui.destroy()
event.ui.deleteLater()
track.events.pop(event_i)
def keyPressEvent(self, event):
key = event.key()
# If any modifer is held besides shift, we don't care for this event
if int(event.modifiers()) & ~Qt.ShiftModifier:
event.ignore()
return
# Track the key press.
if key in self.pressed_keys:
return
self.pressed_keys.add(key)
# log.debug('keyPressEvent %d' % key)
if key == Qt.Key_Space:
if self.is_ready:
self.toggle_pause()
# If this key is a trigger for a track and there isn't already an
# open event (ie one in progress already), then make a new one.
elif key in self.key_to_track and key not in self.key_to_open_event:
track = self.key_to_track[key]
# Make sure we are getting an acurate time here. There may be
# issues if the sync itself takes some time to complete.
#
# We could time how long this takes to complete and then subtract
# that from the time value we get, but we don't know if the delay
# is on the front or the back. I'm not going to bother for now.
self.sync(threshold=1.0/30, verbose=True)
# Create the new event, store it in all the right places, and
# signal to everyone else that it exists.
frame = self.frame
event = Event(frame, frame)
track.add_event(event)
self.key_to_open_event[key] = event
if track.group:
if track.group in self.group_to_open_event:
open_event = self.group_to_open_event[track.group]
self.close_event(open_event)
for k, v in self.key_to_open_event.items():
if v == open_event:
del self.key_to_open_event[k]
self.pressed_keys.discard(k)
self.group_to_open_event[track.group] = event
self.emit(SIGNAL('event_created'), track, event)
def close_event(self, event):
# Make sure we are getting an accurate time. See my note in
# the keyPressEvent for why this can be wrong.
self.sync(threshold=1.0/30, verbose=True)
event.end = self.frame
# Let everyone know...
self.emit(SIGNAL('event_updated'), event)
def keyReleaseEvent(self, event):
key = event.key()
# If any modifer is held besides shift, we don't care for this event
if int(event.modifiers()) & ~Qt.ShiftModifier:
event.ignore()
return
# log.debug('keyReleaseEvent %d' % key)
# Ignore the release event if it is a track trigger, and the shift
# button is held down. This effectively makes the keys sticky. One can
# cancel it by hitting it normally.
if not (key in self.key_to_track and Qt.Key_Shift in self.pressed_keys):
# Discard doesn't error if the key isn't in there.
self.pressed_keys.discard(key)
if key in self.key_to_open_event:
track = self.key_to_track[key]
event = self.key_to_open_event.pop(key)
if track.group and self.group_to_open_event.get(track.group) == event:
del self.group_to_open_event[track.group]
self.close_event(event)
|
|
# -*- coding: utf-8 -*-
from re import compile, UNICODE
from expanders import Expander
from macro_caller import get_macro_name, expand_macro_from_stream
__all__ = ('ExpanderRegister', 'ParserRegister', 'Register', 'RegisterMap')
class RegisterMap(dict):
""" Register map is dictionary holding macro : register_with_allowed_macros pair """
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
for k in self:
self.__after_add(k)
self.hooks = {}
def __after_add(self, k):
self[k].visit_register_map(self)
def __setitem__(self, k, v):
dict.__setitem__(self, k,v)
self.__after_add(k)
def add_hooks(self, hooks):
for hook in hooks:
if hook.macro:
if not self.hooks.has_key(hook.macro):
self.hooks[hook.macro] = set()
self.hooks[hook.macro].add(hook)
def pre_hooks(self, stream, macro, builder):
if macro.__class__ in self.hooks:
for hook in self.hooks[macro.__class__]:
stream = hook().pre_macro(stream, macro, builder)
return stream
def post_hooks(self, macro, builder):
if macro.__class__ in self.hooks:
for hook in self.hooks[macro.__class__]:
hook().post_macro(macro, builder)
class ParserRegister(object):
""" Parser register is holding parsers (aka 'alternative syntaxes') allowed to use for parsing.
ParserRegister is also responsible for resolving those alternative syntaxes in stream """
def __init__(self, parsers=None):
self.parser_start = {}
#self.parser_start_compiled = {}
if parsers is not None:
for parser in parsers:
self.add(parser)
def add(self, parser):
if parser.start is not None:
for start in parser.start:
if isinstance(start, str):
start = start.decode('utf-8')
self.parser_start[start] = (compile(u''.join([u'^', start]), flags=UNICODE), parser)
#self.parser_start_compiled[compile(''.join(['^', start]))] = parser
def get_parser(self, regexp):
try:
return self.parser_start[regexp][1]
except KeyError:
raise ValueError('No Parser in register starting with %s' % regexp)
def _most_matching(self, matching):
""" Return most matching parser and chunk on which it's resolved """
most = None
length = 0
for m in matching:
mlen = len(m.string[m.start():m.end()])
if mlen > length:
most = m
length = len(m.string[m.start():m.end()])
elif mlen == length:
#logging.debug('Two or more parsers are matching, ' \
#'performing the priority check')
m_parser = self.parser_start[m.re.pattern[1:]][1]
most_parser = self.parser_start[most.re.pattern[1:]][1]
if getattr(m_parser, 'priority', 0) > \
getattr(most_parser, 'priority', 0):
most = m
if most is None:
return (None, None)
return (self.parser_start[most.re.pattern[1:]][1], most.string[most.start():most.end()])
def resolve_parser(self, stream, register, whole_stream=None):
""" Resolve parser stream.
Return properly initialized parser or None
"""
if whole_stream is None:
whole_stream = stream
matching = []
for start in self.parser_start:
compiled, parser = self.parser_start[start]
if start.find('^') != -1:
if compiled.match(whole_stream) and stream == whole_stream:
matching.append(compiled.match(whole_stream))
else:
if compiled.match(stream):
matching.append(compiled.match(stream))
#matching = [parser_start.match(stream) for parser_start in self.parser_start_compiled if parser_start.match(stream)]
if len(matching) == 0:
return None
parser, chunk = self._most_matching(matching)
if parser is None or chunk is None:
return None
return parser(stream, self, chunk, register)
class Register(object):
def __init__(self, macro_list=None, parsers=None):
self.register_map = None
self.macro_map = {}
self.parser_register = ParserRegister()
if macro_list is not None:
self.add_macros(macro_list)
if parsers is not None:
self.add_parsers(parsers)
def add_macro(self, macro):
if self.macro_map.has_key(macro.name):
raise ValueError, 'Macro %s already added under name %s' % (self.macro_map[macro.name], macro.name)
self.macro_map[macro.name] = macro
def add_macros(self, macro_list):
for p in macro_list:
self.add(p)
def add(self, macro):
""" Backward-compatibility symlink, use add_macro instead """
self.add_macro(macro)
def add_parsers(self, parsers):
for parser in parsers:
self.add_parser(parser)
def add_parser(self, parser):
if parser.macro.name in self.macro_map:
self.parser_register.add(parser)
def visit_register_map(self, register_map):
self.register_map = register_map
def get_macro(self, name):
try:
return self.macro_map[name]
except KeyError:
raise ValueError, 'No macro parser registered under name %s in registry' % name
def resolve_parser_macro(self, stream):
""" Try resolving parser in macro syntax.
Return properly initialized parser or None
"""
# logging.debug('Trying to resolve macro in stream')
try:
if not isinstance(stream, unicode):
raise TypeError("Stream expected to be unicode string, %s instead (stream: %s)" % (type(stream), stream))
return self.macro_map[get_macro_name(stream, self)]
except KeyError:
# logging.debug('Macro name %s not in my macro_map' % get_macro_name(stream,self))
return None
else:
raise NotImplementedError,('Unexpected condition, please report this as bug')
def resolve_macro(self, stream, builder, state=None, whole_stream=None):
# backward compatibility for tests
if isinstance(stream, str):
stream = stream.decode('utf-8')
if whole_stream is None:
whole_stream = stream
parser = self.parser_register.resolve_parser(stream, self, whole_stream)
if parser is not None:
# Macro resolved in alternate syntax, use parser to get macro
macro, stream_new = parser.get_macro(builder, state)
return (macro, stream_new)
# resolve in macro syntax
macro = self.resolve_parser_macro(stream)
if macro is not None:
return expand_macro_from_stream(stream, self, builder, state)
return (None, None)
class ExpanderRegister(object):
def __init__(self, expander_map):
self.expander_map = {}
for k in expander_map:
if not isinstance(expander_map[k], Expander):
raise ValueError('%s must be instance of Expander' % expander_map[k])
self.expander_map[k] = expander_map[k]
def get(self, node, format='xhtml11'):
try:
return self.expander_map[format][node]
except KeyError:
raise ValueError('Expander for format %s for node %s not in registry' % (format, node))
|
|
#
# BaseImage.py -- Abstraction of an generic data image.
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy as np
import logging
from ginga.misc import Bunch, Callback
from ginga import trcalc, AutoCuts
class ImageError(Exception):
pass
class ViewerObjectBase(Callback.Callbacks):
def __init__(self, metadata=None, logger=None, name=None):
Callback.Callbacks.__init__(self)
if logger is None:
logger = logging.getLogger('BaseImage')
logger.addHandler(logging.NullHandler())
self.logger = logger
self.metadata = {}
if metadata:
self.update_metadata(metadata)
# make sure an object has these attributes
# TODO: this maybe should have a unique random string or something
# but we'd have to fix a lot of code that is currently checking for
# None
self.metadata.setdefault('name', None)
# For callbacks
for name in ('modified', ):
self.enable_callback(name)
def get_metadata(self):
return self.metadata.copy()
def clear_metadata(self):
self.metadata = {}
def clear_all(self):
self.clear_metadata()
def update_metadata(self, map_like):
for key, val in map_like.items():
self.metadata[key] = val
def get(self, kwd, *args):
if kwd in self.metadata:
return self.metadata[kwd]
else:
# return a default if there is one
if len(args) > 0:
return args[0]
raise KeyError(kwd)
def get_list(self, *args):
return [self.get(kwd) for kwd in args]
def __getitem__(self, kwd):
return self.metadata[kwd]
def update(self, kwds):
self.metadata.update(kwds)
def set(self, **kwds):
self.update(kwds)
def __setitem__(self, kwd, value):
self.metadata[kwd] = value
class BaseImage(ViewerObjectBase):
def __init__(self, data_np=None, metadata=None, logger=None, order=None,
name=None):
ViewerObjectBase.__init__(self, logger=logger, metadata=metadata,
name=name)
if data_np is None:
data_np = np.zeros((0, 0))
self._data = data_np
self.order = ''
self.name = name
# For navigating multidimensional data
self.axisdim = []
self.naxispath = []
self.revnaxis = []
self._set_minmax()
self._calc_order(order)
self.autocuts = AutoCuts.Histogram(self.logger)
@property
def shape(self):
return self._get_data().shape
@property
def width(self):
if self.ndim < 2:
return 0
# NOTE: numpy stores data in column-major layout
return self.shape[1]
@property
def height(self):
# NOTE: numpy stores data in column-major layout
return self.shape[0]
@property
def depth(self):
return self.get_depth()
@property
def ndim(self):
return len(self.shape)
@property
def dtype(self):
return self._get_data().dtype
def get_size(self):
return (self.width, self.height)
def get_depth(self):
shape = self.shape
if len(shape) > 2:
return shape[-1]
return 1
def get_shape(self):
return self.shape
def get_center(self):
wd, ht = self.get_size()
ctr_x, ctr_y = wd // 2, ht // 2
return (ctr_x, ctr_y)
def get_data(self):
return self._data
def _get_data(self):
return self._data
def _get_fast_data(self):
"""
Return an array similar to but possibly smaller than self._data,
for fast calculation of the intensity distribution.
NOTE: this is used by the Ginga plugin for Glue
"""
return self._data
def copy_data(self):
data = self._get_data()
return data.copy()
def get_data_xy(self, x, y):
assert (x >= 0) and (y >= 0), \
ImageError("Indexes out of range: (x=%d, y=%d)" % (
x, y))
view = np.s_[y, x]
res = self._slice(view)
if isinstance(res, np.ndarray) and self.get('ignore_alpha', False):
# <-- this image has a "hidden" alpha array
# NOTE: assumes that data is at index 0
res = res[0]
return res
def set_data(self, data_np, metadata=None, order=None, astype=None):
"""Use this method to SHARE (not copy) the incoming array.
"""
if astype:
data = data_np.astype(astype, copy=False)
else:
data = data_np
self._data = data
self._calc_order(order)
if metadata:
self.update_metadata(metadata)
self._set_minmax()
self.make_callback('modified')
def clear_all(self):
# clear metadata
super(BaseImage, self).clear_all()
# unreference data array
self._data = np.zeros((0, 0))
def _slice(self, view):
if not isinstance(view, tuple):
view = tuple(view)
return self._get_data()[view]
def get_slice(self, c):
view = [slice(None)] * self.ndim
view[-1] = self.order.index(c.upper())
return self._slice(view)
def has_slice(self, c):
return c.upper() in self.order
def get_array(self, order):
order = order.upper()
if order == self.order:
return self._get_data()
l = [self.get_slice(c) for c in order]
return np.dstack(l)
def set_order(self, order):
self.order = order.upper()
def get_order(self):
return self.order
def get_order_indexes(self, cs):
cs = cs.upper()
return [self.order.index(c) for c in cs]
def _calc_order(self, order):
"""Called to set the order of a multi-channel image.
The order should be determined by the loader, but this will
make a best guess if passed `order` is `None`.
"""
if order is not None and order != '':
self.order = order.upper()
else:
self.order = trcalc.guess_order(self.shape)
def has_valid_wcs(self):
return hasattr(self, 'wcs') and self.wcs.has_valid_wcs()
def _set_minmax(self):
data = self._get_fast_data()
try:
self.maxval = np.nanmax(data)
self.minval = np.nanmin(data)
except Exception:
self.maxval = 0
self.minval = 0
# TODO: see if there is a faster way to ignore infinity
try:
if np.isfinite(self.maxval):
self.maxval_noinf = self.maxval
else:
self.maxval_noinf = np.nanmax(data[np.isfinite(data)])
except Exception:
self.maxval_noinf = self.maxval
try:
if np.isfinite(self.minval):
self.minval_noinf = self.minval
else:
self.minval_noinf = np.nanmin(data[np.isfinite(data)])
except Exception:
self.minval_noinf = self.minval
def get_minmax(self, noinf=False):
if not noinf:
return (self.minval, self.maxval)
else:
return (self.minval_noinf, self.maxval_noinf)
# kwargs is needed so subclasses can interoperate with optional keywords.
def get_header(self, **kwargs):
header = self.get('header', None)
if header is None:
header = Header()
self.set(header=header)
return header
def update_keywords(self, key_dict):
hdr = self.get_header()
hdr.update(key_dict)
def transfer(self, other, astype=None):
data = self._get_data()
other.set_data(data, metadata=self.metadata, astype=astype)
def copy(self, astype=None):
data = self.copy_data()
metadata = self.get_metadata()
other = self.__class__(data_np=data, metadata=metadata)
return other
def cutout_data(self, x1, y1, x2, y2, xstep=1, ystep=1, z=None,
astype=None):
"""Cut out data area based on bounded coordinates.
Parameters
----------
x1, y1 : int
Coordinates defining the minimum corner to be cut out
x2, y2 : int
Coordinates *one greater* than the maximum corner
xstep, ystep : int
Step values for skip intervals in the cutout region
z : int
Value for a depth (slice) component for color images
astype :
Note that the coordinates for `x2`, `y2` are *outside* the
cutout region, similar to slicing parameters in Python.
"""
view = np.s_[y1:y2:ystep, x1:x2:xstep]
data_np = self._slice(view)
if z is not None and len(data_np.shape) > 2:
data_np = data_np[..., z]
if astype:
data_np = data_np.astype(astype, copy=False)
return data_np
def cutout_adjust(self, x1, y1, x2, y2, xstep=1, ystep=1, z=0, astype=None):
"""Like `cutout_data`, but adjusts coordinates `x1`, `y1`, `x2`, `y2`
to be inside the data area if they are not already. It tries to
preserve the width and height of the region, so e.g. (-2, -2, 5, 5)
could become (0, 0, 7, 7)
"""
dx = x2 - x1
dy = y2 - y1
if x1 < 0:
x1, x2 = 0, dx
else:
if x2 >= self.width:
x2 = self.width
x1 = x2 - dx
if y1 < 0:
y1, y2 = 0, dy
else:
if y2 >= self.height:
y2 = self.height
y1 = y2 - dy
data = self.cutout_data(x1, y1, x2, y2, xstep=xstep, ystep=ystep,
z=z, astype=astype)
return (data, x1, y1, x2, y2)
def cutout_radius(self, x, y, radius, xstep=1, ystep=1, astype=None):
return self.cutout_adjust(x - radius, y - radius,
x + radius + 1, y + radius + 1,
xstep=xstep, ystep=ystep, astype=astype)
def cutout_cross(self, x, y, radius):
"""Cut two data subarrays that have a center at (x, y) and with
radius (radius) from (image). Returns the starting pixel (x0, y0)
of each cut and the respective arrays (xarr, yarr).
"""
n = radius
wd, ht = self.get_size()
x0, x1 = max(0, x - n), min(wd - 1, x + n)
y0, y1 = max(0, y - n), min(ht - 1, y + n)
xview = np.s_[y, x0:x1 + 1]
yview = np.s_[y0:y1 + 1, x]
xarr = self._slice(xview)
yarr = self._slice(yview)
return (x0, y0, xarr, yarr)
def get_shape_mask(self, shape_obj):
"""
Return full mask where True marks pixels within the given shape.
"""
wd, ht = self.get_size()
xi, yi = np.meshgrid(range(0, wd), range(0, ht))
pts = np.array((xi, yi)).T
contains = shape_obj.contains_pts(pts)
return contains
def get_shape_view(self, shape_obj, avoid_oob=True):
"""
Calculate a bounding box in the data enclosing `shape_obj` and
return a view that accesses it and a mask that is True only for
pixels enclosed in the region.
If `avoid_oob` is True (default) then the bounding box is clipped
to avoid coordinates outside of the actual data.
"""
x1, y1, x2, y2 = [int(np.round(n)) for n in shape_obj.get_llur()]
if avoid_oob:
# avoid out of bounds indexes
wd, ht = self.get_size()
x1, x2 = max(0, x1), min(x2, wd - 1)
y1, y2 = max(0, y1), min(y2, ht - 1)
# calculate pixel containment mask in bbox
xi, yi = np.meshgrid(range(x1, x2 + 1), range(y1, y2 + 1))
pts = np.array((xi, yi)).T
contains = shape_obj.contains_pts(pts)
view = np.s_[y1:y2 + 1, x1:x2 + 1]
return (view, contains)
def cutout_shape(self, shape_obj):
"""
Cut out and return a portion of the data corresponding to `shape_obj`.
A masked numpy array is returned, where the pixels not enclosed in
the shape are masked out.
"""
view, mask = self.get_shape_view(shape_obj)
# cutout our enclosing (possibly shortened) bbox
data = self._slice(view)
# mask non-containing members
mdata = np.ma.array(data, mask=np.logical_not(mask))
return mdata
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht,
method='basic'):
# TO BE DEPRECATED
data_np = self._get_data()
(newdata, (scale_x, scale_y)) = \
trcalc.get_scaled_cutout_wdht(data_np, x1, y1, x2, y2,
new_wd, new_ht,
interpolation=method,
logger=self.logger)
res = Bunch.Bunch(data=newdata, scale_x=scale_x, scale_y=scale_y)
return res
def get_scaled_cutout_basic(self, x1, y1, x2, y2, scale_x, scale_y,
method='basic'):
# TO BE DEPRECATED
p1, p2 = (x1, y1), (x2, y2)
scales = (scale_x, scale_y)
return self.get_scaled_cutout2(p1, p2, scales, method=method,
logger=self.logger)
def get_scaled_cutout(self, x1, y1, x2, y2, scale_x, scale_y,
method='basic', logger=None):
# TO BE DEPRECATED
p1, p2 = (x1, y1), (x2, y2)
scales = (scale_x, scale_y)
return self.get_scaled_cutout2(p1, p2, scales, method=method,
logger=logger)
def get_scaled_cutout2(self, p1, p2, scales,
method='basic', logger=None):
"""Extract a region of the image defined by points `p1` and `p2`
and scale it by scale factors `scales`.
`method` describes the method of interpolation used, where the
default "basic" is nearest neighbor.
"""
if logger is None:
logger = self.logger
data = self._get_data()
newdata, oscales = trcalc.get_scaled_cutout_basic2(data, p1, p2, scales,
interpolation=method,
logger=logger)
scale_x, scale_y = oscales[:2]
res = Bunch.Bunch(data=newdata, scale_x=scale_x, scale_y=scale_y)
if len(scales) > 2:
res.scale_z = oscales[2]
return res
def get_thumbnail(self, length):
wd, ht = self.get_size()
if ht == 0:
width, height = 1, 1
elif wd > ht:
width, height = length, int(length * float(ht) / wd)
else:
width, height = int(length * float(wd) / ht), length
res = self.get_scaled_cutout_wdht(0, 0, wd, ht, width, height)
return res.data
def get_pixels_on_line(self, x1, y1, x2, y2, getvalues=True):
"""Uses Bresenham's line algorithm to enumerate the pixels along
a line.
(see http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm)
If `getvalues`==False then it will return tuples of (x, y) coordinates
instead of pixel values.
"""
# NOTE: seems to be necessary or we get a non-terminating result
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
dx = abs(x2 - x1)
dy = abs(y2 - y1)
if x1 < x2:
sx = 1
else:
sx = -1
if y1 < y2:
sy = 1
else:
sy = -1
err = dx - dy
res = []
x, y = x1, y1
while True:
if getvalues:
try:
val = self.get_data_xy(x, y)
except Exception:
val = np.NaN
res.append(val)
else:
res.append((x, y))
if (x == x2) and (y == y2):
break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x += sx
if e2 < dx:
err = err + dx
y += sy
return res
def info_xy(self, data_x, data_y, settings):
# Get the value under the data coordinates
try:
# We report the value across the pixel, even though the coords
# change halfway across the pixel
_d_x, _d_y = (int(np.floor(data_x + 0.5)),
int(np.floor(data_y + 0.5)))
value = self.get_data_xy(_d_x, _d_y)
except Exception as e:
value = None
info = Bunch.Bunch(itype='base', data_x=data_x, data_y=data_y,
x=data_x, y=data_y, value=value)
wd, ht = self.get_size()
if 0 < data_x < wd and 0 < data_y < ht:
info.image_x, info.image_y = data_x, data_y
return info
class Header(dict):
def __init__(self, *args, **kwdargs):
super(Header, self).__init__(*args, **kwdargs)
self.keyorder = []
def __getitem__(self, key):
bnch = super(Header, self).__getitem__(key)
return bnch.value
def __setitem__(self, key, value):
try:
bnch = super(Header, self).__getitem__(key)
bnch.value = value
except KeyError:
bnch = Bunch.Bunch(key=key, value=value, comment='')
self.keyorder.append(key)
super(Header, self).__setitem__(key, bnch)
return bnch
def __delitem__(self, key):
super(Header, self).__delitem__(key)
self.keyorder.remove(key)
def get_card(self, key):
bnch = super(Header, self).__getitem__(key)
return bnch
def set_card(self, key, value, comment=None):
try:
bnch = super(Header, self).__getitem__(key)
bnch.value = value
if not (comment is None):
bnch.comment = comment
except KeyError:
if comment is None:
comment = ''
bnch = Bunch.Bunch(key=key, value=value, comment=comment)
self.keyorder.append(key)
super(Header, self).__setitem__(key, bnch)
return bnch
def get_keyorder(self):
return self.keyorder
def keys(self):
return self.keyorder
def items(self):
return [(key, self[key]) for key in self.keys()]
def get(self, key, alt=None):
try:
return self.__getitem__(key)
except KeyError:
return alt
def merge(self, hdr, override_keywords=False):
if not isinstance(hdr, Header):
raise ValueError("need to pass a compatible header for merge")
for key in hdr.keys():
if key not in self or override_keywords:
card = hdr.get_card(key)
self.set_card(key, card.value, comment=card.comment)
def update(self, map_kind):
for key, value in map_kind.items():
self.__setitem__(key, value)
def asdict(self):
return dict([(key, self[key]) for key in self.keys()])
# END
|
|
# Copyright 2007 Owen Taylor
#
# This file is part of Reinteract and distributed under the terms
# of the BSD license. See the file COPYING in the Reinteract
# distribution for full details.
#
########################################################################
import pango
import gtk
import data_format
import doc_format
from global_settings import global_settings
from popup import Popup
MAX_HEIGHT = 300
PADDING = 5
# Size of fonts in the doc popup relative to normal application font size
FONT_SCALE = 0.9
class DocPopup(Popup):
"""Class implementing a popup showing docs about an object"""
__gsignals__ = {
'destroy': 'override',
'size-request': 'override',
'size-allocate': 'override',
'map': 'override',
'style-set': 'override'
}
#
# There are basically three modes to the popup:
#
# - Enough space for the text (also used when can_focus=False)
# - Needs vertical scrollbar, not focused (shows 'Press F2 for focus at the bottom)
# - Needs vertical scrollbar, focused (has scrollbar)
#
# Trying to deal with all these different modes by using scrolled windows
# and vboxes would make it really hard to get the details right. Instead we
# get the ultimate control by overriding the container methods of gtk.Window
# and doing everything ourself. See:
#
# - The calls to __set_parent() in __init__
# - The overrides of do_map(), do_forall(), do_size_request(), do_size_allocate()
#
def __init__(self, fixed_height=False, fixed_width=False, max_height=MAX_HEIGHT, can_focus=True):
Popup.__init__(self)
self.__fixed_height = fixed_height
self.__fixed_width = fixed_width
self.__max_height = max_height
self.__can_focus = can_focus
self.__view = gtk.TextView()
self.__view.set_editable(False)
bg_color = gtk.gdk.Color(0xffff, 0xffff, 0xbfbf)
self.__view.modify_base(gtk.STATE_NORMAL, bg_color)
self.modify_bg(gtk.STATE_NORMAL, bg_color)
self.set_app_paintable(True)
self.__view.modify_text(gtk.STATE_NORMAL, gtk.gdk.Color(0, 0, 0))
self.__view.set_parent(self)
self.__view.show()
self.__view.grab_focus()
self.__font_is_custom_connection = global_settings.connect('notify::doc-tooltip-font-is-custom', self.__update_font)
self.__font_name_connection = global_settings.connect('notify::doc-tooltip-font-name', self.__update_font)
self.__update_font()
self.__scrollbar = gtk.VScrollbar()
self.__scrollbar.set_parent(self)
self.__scrollbar.show()
self.__view.emit('set-scroll-adjustments', None, self.__scrollbar.get_adjustment())
self.__vscrolled = False
self.set_resizable(False)
buf = self.__view.get_buffer()
self.__bold_tag = buf.create_tag(None, weight=pango.WEIGHT_BOLD)
self.__heading_type_tag = buf.create_tag(None, weight=pango.WEIGHT_BOLD, pixels_below_lines=5)
self.__inline_type_tag = self.__bold_tag
self.__value_tag = buf.create_tag(None, family="monospace")
self.__target = None
self.focused = False
def __update_font(self, *args):
if global_settings.doc_tooltip_font_is_custom:
self.__font = pango.FontDescription(global_settings.doc_tooltip_font_name)
else:
self.__font = self.get_style().font_desc
# We round the scaled font size to an integer point size, because fonts may
# (or may not be) set up to look better at integer point sizes
new_size = 1024 * int(FONT_SCALE * self.__font.get_size() / 1024)
self.__font.set_size(new_size)
self.__view.modify_font(self.__font)
def set_target(self, target):
"""Set the object that the popup is showing documentation about"""
if target is self.__target:
return
self.__target = target
buf = self.__view.get_buffer()
buf.delete(buf.get_start_iter(), buf.get_end_iter())
if target is not None:
if data_format.is_data_object(target):
data_format.insert_formatted(buf, buf.get_start_iter(), target, self.__heading_type_tag, self.__inline_type_tag, self.__value_tag)
else:
doc_format.insert_docs(buf, buf.get_start_iter(), target, self.__bold_tag)
buf.place_cursor(buf.get_start_iter())
self.__scrollbar.get_adjustment().set_value(0.)
def do_destroy(self):
global_settings.disconnect(self.__font_is_custom_connection)
global_settings.disconnect(self.__font_name_connection)
def do_size_request(self, request):
view_width, view_height = self.__view.size_request()
bw = self.get_border_width()
request.height = view_height + 2 * (bw + PADDING)
self.__vscrolled = self.__max_height > 0 and request.height > self.__max_height
self.__scrollbar.set_child_visible(self.focused and self.__vscrolled)
if self.__fixed_height:
request.height = self.__max_height
else:
if self.__max_height > 0 and request.height > self.__max_height:
request.height = self.__max_height
request.width = view_width + 2 * (bw + PADDING)
if self.focused and self.__vscrolled:
scrollbar_width, _ = self.__scrollbar.size_request()
request.width += scrollbar_width
# fixed_width doesn't mean completely fixed, it means to put a floor on it so we don't bounce
# the size too much
metrics = self.get_pango_context().get_metrics(self.__font)
if self.__fixed_width:
request.width = max(request.width, metrics.get_approximate_char_width() * (90. / pango.SCALE))
# We always want a maximum width so that faulty docs don't cause us to have widths many times
# the width of the screen
request.width = min(request.width, metrics.get_approximate_char_width() * (120. / pango.SCALE))
def __create_f2_layout(self):
return self.create_pango_layout("Press 'F2' for focus")
def do_size_allocate(self, allocation):
self.allocation = allocation
if self.focused and self.__vscrolled:
scrollbar_width, _ = self.__scrollbar.size_request()
else:
scrollbar_width = 0
bw = self.get_border_width()
child_allocation = gtk.gdk.Rectangle()
child_allocation.x = bw + PADDING
child_allocation.width = allocation.width - 2 * (bw + PADDING) - scrollbar_width
if self.__vscrolled and self.__can_focus:
if not self.focused:
layout = self.__create_f2_layout()
_, height = layout.get_pixel_size()
child_allocation.y = bw + PADDING
child_allocation.height = allocation.height - 2 * bw - PADDING - height
else:
child_allocation.y = bw
child_allocation.height = allocation.height - 2 * bw
else:
child_allocation.y = bw + PADDING
child_allocation.height = allocation.height - 2 * (bw + PADDING)
self.__view.size_allocate(child_allocation)
if self.focused and self.__vscrolled:
child_allocation.x = allocation.width - scrollbar_width - 1
child_allocation.y = 1
child_allocation.width = scrollbar_width
child_allocation.height = allocation.height - 2
self.__scrollbar.size_allocate(child_allocation)
def do_expose_event(self, event):
Popup.do_expose_event(self, event)
if self.__can_focus and not self.focused and self.__vscrolled:
layout = self.__create_f2_layout()
width, height = layout.get_pixel_size()
cr = event.window.cairo_create()
cr.set_source_rgb(0., 0., 0.)
cr.rectangle(0, self.allocation.height - height, self.allocation.width, 1)
cr.fill()
cr.move_to(self.allocation.width - width - 5, self.allocation.height - height)
cr.show_layout(layout)
def do_forall(self, include_internals, func, data):
if include_internals:
func(self.__view, data)
func(self.__scrollbar, data)
def do_map(self):
Popup.do_map(self)
self.__view.map()
if self.focused and self.__vscrolled:
self.__scrollbar.map()
def do_style_set(self, old_style):
# Calling update_font() from the ::style-set handler on the view would
# trigger an infinite loop, but it's fine to do it from the handler on
# the toplevel window
self.__update_font()
def __show(self, focus):
if self.showing:
if focus:
self.focus()
return
# We want to avoid:
#
# - get the size for the popup without validating the TextView
# - allocate at that size, queuing a resize because the
# gtk_text_view_size_allocate() flushes the "first validate idle"
# - popup small
# - resize larger
#
# So before we show the popup at all, we allocate the TextView
# at a large size so it can figure out how big it really wants
# to be, and queue a resize at that size. Then we go ahead and
# show the window.
self.__view.size_request()
self.__view.size_allocate(gtk.gdk.Rectangle(0, 0, 10000, 1000))
self.__view.queue_resize()
if focus:
# changing the focus state can change our requisition by showing
# the scrollbar. We set the focused flag first so we show at the
# right size.
self.focused = True
self.queue_resize()
self.show()
if focus:
self.focus()
self.showing = True
def popup(self):
"""Show the popup"""
self.__show(focus=False)
def popup_focused(self):
"""Show the popup initially focused"""
self.__show(focus=True)
def popdown(self):
"""Hide the popup"""
if not self.showing:
return
self.showing = False
if self.focused:
self.focused = False
self.queue_resize()
self.hide()
def focus(self):
assert self.__can_focus
Popup.focus(self)
if self.showing:
self.queue_resize()
def on_key_press_event(self, event):
"""Do key press handling while the popup is focused.
Returns True if the key press is handled, False otherwise.
"""
if event.keyval == gtk.keysyms.Escape:
self.popdown()
return True
else:
return self.event(event)
if __name__ == "__main__": # INTERACTIVE
import re
popup = DocPopup()
popup.set_target(re)
popup.popup()
popup = DocPopup()
popup.set_target(re)
popup.move(0, 325)
popup.popup_focused()
popup = DocPopup(can_focus=False)
popup.set_target(re)
popup.move(0, 650)
popup.popup()
popup = DocPopup()
popup.set_target(range(200))
popup.move(500, 0)
popup.popup_focused()
gtk.main()
|
|
"""Tests for vizier.pyvizier.shared.trial."""
import copy
import datetime
import numpy as np
from vizier.pyvizier.shared import trial
from absl.testing import absltest
from absl.testing import parameterized
Metric = trial.Metric
Measurement = trial.Measurement
class MetricTest(absltest.TestCase):
def testMetricCreation(self):
_ = Metric(value=0, std=0.5)
def testMetricCanHaveNaN(self):
_ = Metric(value=np.nan, std=-np.nan)
def testMetricCannotHaveNegativeStd(self):
with self.assertRaises(ValueError):
_ = Metric(value=0, std=-0.5)
class MeasurementTest(absltest.TestCase):
def testMetricsInitializedFromFloats(self):
m = Measurement()
m.metrics = dict(a=0.3)
self.assertEqual(m.metrics['a'], Metric(0.3))
m.metrics['b'] = 0.5
self.assertEqual(m.metrics, {'a': Metric(0.3), 'b': Metric(0.5)})
def testMetrics(self):
m = Measurement()
m.metrics = dict(a=Metric(0.3))
self.assertEqual(m.metrics['a'], Metric(0.3))
def testTimeStampsAreNotFrozen(self):
m = Measurement()
m.elapsed_secs = 1.0
m.steps = 5
ParameterValue = trial.ParameterValue
class ParameterValueTest(parameterized.TestCase):
@parameterized.named_parameters(('True', True), ('False', False))
def testBool(self, bool_value):
value = ParameterValue(bool_value)
self.assertEqual(value.as_float, float(bool_value))
self.assertEqual(value.as_int, int(bool_value))
self.assertEqual(value.as_str, str(bool_value).lower())
def testIntegralFloat0(self):
value = ParameterValue(0.0)
self.assertEqual(value.as_float, 0.0)
self.assertEqual(value.as_int, 0)
self.assertEqual(value.as_bool, False)
self.assertIsNone(value.as_str)
def testIntegralFloat1(self):
value = ParameterValue(1.0)
self.assertEqual(value.as_float, 1.0)
self.assertEqual(value.as_int, 1)
self.assertEqual(value.as_bool, True)
self.assertIsNone(value.as_str)
def testIntegralFloat2(self):
value = ParameterValue(2.0)
self.assertEqual(value.as_float, 2.0)
self.assertEqual(value.as_int, 2)
self.assertIsNone(value.as_bool)
self.assertIsNone(value.as_str)
def testInteger0(self):
value = ParameterValue(0)
self.assertEqual(value.as_float, 0)
self.assertEqual(value.as_int, 0)
self.assertEqual(value.as_bool, False)
self.assertIsNone(value.as_str)
def testInteger1(self):
value = ParameterValue(1)
self.assertEqual(value.as_float, 1)
self.assertEqual(value.as_int, 1)
self.assertEqual(value.as_bool, True)
self.assertIsNone(value.as_str)
def testInteger2(self):
value = ParameterValue(2)
self.assertEqual(value.as_float, 2)
self.assertEqual(value.as_int, 2)
self.assertIsNone(value.as_bool)
self.assertIsNone(value.as_str)
def testStringTrue(self):
value = ParameterValue('true')
self.assertEqual(value.as_bool, True)
self.assertEqual(value.as_str, 'true')
def testStringFalse(self):
value = ParameterValue('false')
self.assertEqual(value.as_bool, False)
self.assertEqual(value.as_str, 'false')
def testCastAsExternalNone(self):
value = ParameterValue(1.0)
# pytype: disable=wrong-arg-types
with self.assertRaisesRegex(ValueError, 'Unknown external type'):
value.cast(None)
# pytype: enable=wrong-arg-types
def testParameterCanHaveNonFiniteValues(self):
ParameterValue(float('nan'))
ParameterValue(value=float('inf'))
ParameterValue(value=float('inf'))
class TrialTest(absltest.TestCase):
def testCompleteInplace(self):
test = trial.Trial()
measurement = Measurement(metrics={
'pr-auc': Metric(value=0.8),
'latency': Metric(value=32)
})
completed = test.complete(measurement, inplace=True)
# The trial was completed in place.
self.assertEqual(test.final_measurement, measurement)
self.assertLessEqual(test.completion_time,
datetime.datetime.now().astimezone())
self.assertGreaterEqual(test.completion_time, test.creation_time)
self.assertGreaterEqual(test.duration.total_seconds(), 0)
# completed is the same reference as test.
self.assertEqual(test, completed)
def testCompleteNotInplace(self):
"""Complete with inplace=False."""
test = trial.Trial(status=trial.TrialStatus.PENDING)
measurement = Measurement(metrics={
'pr-auc': Metric(value=0.8),
'latency': Metric(value=32)
})
test_copy = copy.deepcopy(test)
completed = test.complete(measurement, inplace=False)
# The returned Trial is completed.
self.assertEqual(completed.final_measurement, measurement)
self.assertGreaterEqual(completed.completion_time, completed.creation_time)
self.assertLessEqual(completed.completion_time,
datetime.datetime.now().astimezone())
self.assertGreaterEqual(completed.duration.total_seconds(), 0)
self.assertEqual(completed.status, trial.TrialStatus.COMPLETED)
self.assertTrue(completed.is_completed)
# The original Trial is unchanged.
self.assertEqual(test_copy, test)
self.assertIsNone(test.final_measurement)
self.assertIsNone(test.completion_time)
self.assertIsNone(test.duration)
self.assertEqual(test.status, trial.TrialStatus.PENDING)
self.assertFalse(test.is_completed)
def testDefaultsNotShared(self):
"""Make sure default parameters are not shared between instances."""
trial1 = trial.Trial()
trial2 = trial.Trial()
trial1.parameters['x1'] = trial.ParameterValue(5)
self.assertEmpty(trial2.parameters)
class ParameterDictTest(parameterized.TestCase):
@parameterized.parameters((True,), (3,), (1.,), ('aa',))
def testAssignRawValue(self, v):
d = trial.ParameterDict()
d['p1'] = v
self.assertEqual(d.get('p1'), trial.ParameterValue(v))
self.assertEqual(d.get_value('p1'), v)
self.assertEqual(d.get_value('p2', 'default'), 'default')
self.assertLen(d, 1)
self.assertLen(d.items(), 1)
@parameterized.parameters((True,), (3,), (1.,), ('aa',))
def testAssignWrappedValue(self, v):
d = trial.ParameterDict()
v = trial.ParameterValue(v)
d['p1'] = v
self.assertEqual(d.get('p1'), v)
self.assertEqual(d.get_value('p1'), v.value)
self.assertEqual(d.get_value('p2', 'default'), 'default')
self.assertLen(d, 1)
self.assertLen(d.items(), 1)
class SuggestionTestI(absltest.TestCase):
def testToTrial(self):
suggestion = trial.TrialSuggestion({'a': 3, 'b': True})
suggestion.metadata['key'] = 'value'
t = suggestion.to_trial(1)
self.assertEqual(t.id, 1)
self.assertEqual(t.parameters, suggestion.parameters)
self.assertEqual(t.metadata, suggestion.metadata)
if __name__ == '__main__':
absltest.main()
|
|
"""Contains the hardware interface and drivers for the Open Pinball Project
platform hardware, including the solenoid, input, incandescent, and neopixel
boards.
"""
# opp.py
# Mission Pinball Framework
# Written by Hugh Spahr
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
import logging
import time
import sys
import threading
import Queue
import traceback
import io
from copy import deepcopy
from mpf.system.platform import Platform
from mpf.system.config import Config
from mpf.system.utility_functions import Util
try:
import serial
serial_imported = True
except:
serial_imported = False
# Minimum firmware versions needed for this module
MIN_FW = 0x00000100
BAD_FW_VERSION = 0x01020304
class OppRs232Intf:
GET_SER_NUM_CMD = '\x00'
GET_PROD_ID_CMD = '\x01'
GET_GET_VERS_CMD = '\x02'
GET_SET_SER_NUM_CMD = '\x03'
RESET_CMD = '\x04'
GO_BOOT_CMD = '\x05'
CFG_SOL_CMD = '\x06'
KICK_SOL_CMD = '\x07'
READ_GEN2_INP_CMD = '\x08'
CFG_INP_CMD = '\x09'
SAVE_CFG_CMD = '\x0b'
ERASE_CFG_CMD = '\x0c'
GET_GEN2_CFG = '\x0d'
SET_GEN2_CFG = '\x0e'
CHNG_NEO_CMD = '\x0f'
CHNG_NEO_COLOR = '\x10'
CHNG_NEO_COLOR_TBL = '\x11'
SET_NEO_COLOR_TBL = '\x12'
INCAND_CMD = '\x13'
CFG_IND_SOL_CMD = '\x14'
CFG_IND_INP_CMD = '\x15'
SET_IND_NEO_CMD = '\x16'
INV_CMD = '\xf0'
ILLEGAL_CMD = '\xfe'
EOM_CMD = '\xff'
CARD_ID_TYPE_MASK = '\xf0'
CARD_ID_SOL_CARD = '\x00'
CARD_ID_INP_CARD = '\x10'
CARD_ID_GEN2_CARD = '\x20'
NUM_G2_WING_PER_BRD = 4
WING_SOL = '\x01'
WING_INP = '\x02'
WING_INCAND = '\x03'
WING_SW_MATRIX_OUT = '\x04'
WING_SW_MATRIX_IN = '\x05'
WING_NEO = '\x06'
NUM_G2_INP_PER_BRD = 32
CFG_INP_STATE = '\x00'
CFG_INP_FALL_EDGE = '\x01'
CFG_INP_RISE_EDGE = '\x02'
NUM_G2_SOL_PER_BRD = 16
CFG_SOL_USE_SWITCH = '\x01'
CFG_SOL_AUTO_CLR = '\x02'
NUM_COLOR_TBL = 32
NEO_CMD_ON = 0x80
INCAND_ROT_LEFT = '\x00'
INCAND_ROT_RIGHT = '\x01'
INCAND_LED_ON = '\x02'
INCAND_LED_OFF = '\x03'
INCAND_BLINK_SLOW = '\x04'
INCAND_BLINK_FAST = '\x05'
INCAND_BLINK_OFF = '\x06'
INCAND_SET_ON_OFF = '\x07'
INCAND_SET_CMD = '\x80'
INCAND_SET_ON = '\x01'
INCAND_SET_BLINK_SLOW = '\x02'
INCAND_SET_BLINK_FAST = '\x04'
# Solenoid configuration constants
CFG_BYTES_PER_SOL = 3
INIT_KICK_OFFSET = 1
DUTY_CYCLE_OFFSET = 2
CFG_SOL_USE_SWITCH = '\x01'
CFG_SOL_AUTO_CLR = '\x02'
CFG_SOL_DISABLE = '\x00'
CRC8_LOOKUP = \
[ 0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15, 0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d, \
0x70, 0x77, 0x7e, 0x79, 0x6c, 0x6b, 0x62, 0x65, 0x48, 0x4f, 0x46, 0x41, 0x54, 0x53, 0x5a, 0x5d, \
0xe0, 0xe7, 0xee, 0xe9, 0xfc, 0xfb, 0xf2, 0xf5, 0xd8, 0xdf, 0xd6, 0xd1, 0xc4, 0xc3, 0xca, 0xcd, \
0x90, 0x97, 0x9e, 0x99, 0x8c, 0x8b, 0x82, 0x85, 0xa8, 0xaf, 0xa6, 0xa1, 0xb4, 0xb3, 0xba, 0xbd, \
0xc7, 0xc0, 0xc9, 0xce, 0xdb, 0xdc, 0xd5, 0xd2, 0xff, 0xf8, 0xf1, 0xf6, 0xe3, 0xe4, 0xed, 0xea, \
0xb7, 0xb0, 0xb9, 0xbe, 0xab, 0xac, 0xa5, 0xa2, 0x8f, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9d, 0x9a, \
0x27, 0x20, 0x29, 0x2e, 0x3b, 0x3c, 0x35, 0x32, 0x1f, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0d, 0x0a, \
0x57, 0x50, 0x59, 0x5e, 0x4b, 0x4c, 0x45, 0x42, 0x6f, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7d, 0x7a, \
0x89, 0x8e, 0x87, 0x80, 0x95, 0x92, 0x9b, 0x9c, 0xb1, 0xb6, 0xbf, 0xb8, 0xad, 0xaa, 0xa3, 0xa4, \
0xf9, 0xfe, 0xf7, 0xf0, 0xe5, 0xe2, 0xeb, 0xec, 0xc1, 0xc6, 0xcf, 0xc8, 0xdd, 0xda, 0xd3, 0xd4, \
0x69, 0x6e, 0x67, 0x60, 0x75, 0x72, 0x7b, 0x7c, 0x51, 0x56, 0x5f, 0x58, 0x4d, 0x4a, 0x43, 0x44, \
0x19, 0x1e, 0x17, 0x10, 0x05, 0x02, 0x0b, 0x0c, 0x21, 0x26, 0x2f, 0x28, 0x3d, 0x3a, 0x33, 0x34, \
0x4e, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5c, 0x5b, 0x76, 0x71, 0x78, 0x7f, 0x6a, 0x6d, 0x64, 0x63, \
0x3e, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2c, 0x2b, 0x06, 0x01, 0x08, 0x0f, 0x1a, 0x1d, 0x14, 0x13, \
0xae, 0xa9, 0xa0, 0xa7, 0xb2, 0xb5, 0xbc, 0xbb, 0x96, 0x91, 0x98, 0x9f, 0x8a, 0x8d, 0x84, 0x83, \
0xde, 0xd9, 0xd0, 0xd7, 0xc2, 0xc5, 0xcc, 0xcb, 0xe6, 0xe1, 0xe8, 0xef, 0xfa, 0xfd, 0xf4, 0xf3 ]
@staticmethod
def calc_crc8_whole_msg(msgChars):
crc8Byte = 0xff
for indChar in msgChars:
indInt = ord(indChar)
crc8Byte = OppRs232Intf.CRC8_LOOKUP[crc8Byte ^ indInt];
return (chr(crc8Byte))
@staticmethod
def calc_crc8_part_msg(msgChars, startIndex, numChars):
crc8Byte = 0xff
index = 0
while index < numChars:
indInt = ord(msgChars[startIndex + index])
crc8Byte = OppRs232Intf.CRC8_LOOKUP[crc8Byte ^ indInt];
index += 1
return (chr(crc8Byte))
class HardwarePlatform(Platform):
"""Platform class for the OPP hardware.
Args:
machine: The main ``MachineController`` instance.
"""
def __init__(self, machine):
super(HardwarePlatform, self).__init__(machine)
self.log = logging.getLogger('OPP')
self.log.info("Configuring OPP hardware.")
self.platformVersion = "0.1.0.0"
if not serial_imported:
self.log.error('Could not import "pySerial". This is required for '
'the OPP platform interface')
sys.exit()
# ----------------------------------------------------------------------
# Platform-specific hardware features. WARNING: Do not edit these. They
# are based on what the OPP hardware can and cannot do.
self.features['max_pulse'] = 255 # todo
self.features['hw_timer'] = False
self.features['hw_rule_coil_delay'] = False
self.features['variable_recycle_time'] = False
self.features['variable_debounce_time'] = False
# Make the platform features available to everyone
self.machine.config['platform'] = self.features
# ----------------------------------------------------------------------
self.hw_rules = dict()
self.opp_connection = None
self.opp_nodes = list()
self.connection_threads = set()
self.receive_queue = Queue.Queue()
self.opp_incands = []
self.incandDict = dict()
self.opp_solenoid = []
self.solDict = dict()
self.opp_inputs = []
self.inpDict = dict()
self.inpAddrDict = dict()
self.read_input_msg = OppRs232Intf.EOM_CMD
self.opp_neopixels = []
self.neoCardDict = dict()
self.neoDict = dict()
self.incand_reg = False
self.numGen2Brd = 0
self.gen2AddrArr = []
self.currInpData = []
self.badCRC = 0
self.oppFirmwareVers = []
self.minVersion = 0xffffffff
self.tickCnt = 0
config_spec = '''
ports: list
baud: int|115200
config_number_format: string|hex
debug: boolean|False
'''
self.config = Config.process_config(config_spec=config_spec,
source=self.machine.config['opp'])
self.machine_type = (
self.machine.config['hardware']['driverboards'].lower())
if self.machine_type == 'gen1':
self.log.info("Configuring the original OPP boards")
self.log.error("Original OPP boards not currently supported.")
sys.exit()
elif self.machine_type == 'gen2':
self.log.info("Configuring the OPP Gen2 boards")
else:
self.log.error('Invalid driverboards type: %s', self.machine_type)
sys.exit()
# Only including responses that should be received
self.opp_commands = {
OppRs232Intf.INV_CMD: self.inv_resp,
OppRs232Intf.EOM_CMD: self.eom_resp,
OppRs232Intf.GET_GEN2_CFG: self.get_gen2_cfg_resp,
OppRs232Intf.READ_GEN2_INP_CMD: self.read_gen2_inp_resp,
OppRs232Intf.GET_GET_VERS_CMD: self.vers_resp,
}
self._connect_to_hardware()
if 'config_number_format' not in self.machine.config['opp']:
self.machine.config['opp']['config_number_format'] = 'int'
def __repr__(self):
return '<Platform.OPP>'
def process_received_message(self, msg):
"""Sends an incoming message from the OPP hardware to the proper
method for servicing.
"""
if (len(msg) >= 1):
if ((ord(msg[0]) >= ord(OppRs232Intf.CARD_ID_GEN2_CARD)) and
(ord(msg[0]) < (ord(OppRs232Intf.CARD_ID_GEN2_CARD) + 0x20))):
if (len(msg) >= 2):
cmd = msg[1]
else:
cmd = OppRs232Intf.ILLEGAL_CMD
# Look for EOM or INV commands
elif (msg[0] == OppRs232Intf.INV_CMD) or (msg[0] == OppRs232Intf.EOM_CMD):
cmd = msg[0]
else:
cmd = OppRs232Intf.ILLEGAL_CMD
else:
# No messages received, fake an EOM
cmd = OppRs232Intf.EOM_CMD
# Can't use try since it swallows too many errors for now
if cmd in self.opp_commands:
self.opp_commands[cmd](msg)
else:
hex_string = "".join(" 0x%02x" % ord(b) for b in msg)
self.log.warning("Received unknown serial command?%s. (This is "
"very worrisome.)", "".join(" 0x%02x" % ord(b) for b in msg))
# TODO: This means synchronization is lost. Send EOM characters
# until they come back
def _connect_to_hardware(self):
# Connect to each port from the config. This procuess will cause the
# connection threads to figure out which processor they've connected to
# and to register themselves.
for port in self.config['ports']:
self.connection_threads.add(SerialCommunicator(
platform=self, port=port, baud=self.config['baud'],
send_queue=Queue.Queue(), receive_queue=self.receive_queue))
def register_processor_connection(self, name, communicator):
"""Once a communication link has been established with one of the
OPP boards, this method sets the communicator link.
"""
self.opp_connection = communicator
def update_incand(self):
"""Updates all the incandescents connected to OPP hardware. This is done
once per game loop if changes have been made.
It is currently assumed that the oversampling will guarantee proper communication
with the boards. If this does not end up being the case, this will be changed
to update all the incandescents each loop.
Note: This could be made much more efficient by supporting a command
that simply sets the state of all 32 of the LEDs as either on or off.
"""
wholeMsg = []
for incand in self.opp_incands:
# Check if any changes have been made
if ((incand.oldState ^ incand.newState) != 0):
# Update card
incand.oldState = incand.newState
msg = []
msg.append(incand.addr)
msg.append(OppRs232Intf.INCAND_CMD)
msg.append(OppRs232Intf.INCAND_SET_ON_OFF)
msg.append(chr((incand.newState >> 24) & 0xff))
msg.append(chr((incand.newState >> 16) & 0xff))
msg.append(chr((incand.newState >> 8) & 0xff))
msg.append(chr(incand.newState & 0xff))
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
wholeMsg.extend(msg)
if (len(wholeMsg) != 0):
wholeMsg.append(OppRs232Intf.EOM_CMD)
sendCmd = ''.join(wholeMsg)
self.opp_connection.send(sendCmd)
self.log.debug("Update incand cmd:%s", "".join(" 0x%02x" % ord(b) for b in sendCmd))
def get_hw_switch_states(self):
hw_states = dict()
for oppInp in self.opp_inputs:
currBit = 1
for index in range(0, 32):
if ((currBit & oppInp.mask) != 0):
if ((currBit & oppInp.oldState) == 0):
hw_states[oppInp.cardNum + '-' + str(index)] = 1
else:
hw_states[oppInp.cardNum + '-' + str(index)] = 0
currBit <<= 1
self.hw_switch_data = hw_states
return self.hw_switch_data
def inv_resp(self, msg):
self.log.debug("Received Inventory Response:%s", "".join(" 0x%02x" % ord(b) for b in msg))
index = 1
while (msg[index] != OppRs232Intf.EOM_CMD):
if ((ord(msg[index]) & ord(OppRs232Intf.CARD_ID_TYPE_MASK)) == ord(OppRs232Intf.CARD_ID_GEN2_CARD)):
self.numGen2Brd += 1
self.gen2AddrArr.append(msg[index])
self.currInpData.append(0)
index += 1
self.log.info("Found %d Gen2 OPP boards.", self.numGen2Brd)
def eom_resp(self, msg):
# An EOM command can be used to resynchronize communications if message synch is lost
pass
def get_gen2_cfg_resp(self, msg):
# Multiple get gen2 cfg responses can be received at once
self.log.debug("Received Gen2 Cfg Response:%s", "".join(" 0x%02x" % ord(b) for b in msg))
end = False
currIndex = 0
wholeMsg = []
while (not end):
# Verify the CRC8 is correct
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, currIndex, 6)
if (msg[currIndex + 6] != crc8):
self.badCRC += 1
hex_string = "".join(" 0x%02x" % ord(b) for b in msg)
self.log.warning("Msg contains bad CRC:%s.", hex_string)
end = True
else:
hasNeo = False
wingIndex = 0
solMask = 0
inpMask = 0
incandMask = 0
while (wingIndex < OppRs232Intf.NUM_G2_WING_PER_BRD):
if (msg[currIndex + 2 + wingIndex] == OppRs232Intf.WING_SOL):
solMask |= (0x0f << (4 * wingIndex))
inpMask |= (0x0f << (8 * wingIndex))
elif (msg[currIndex + 2 + wingIndex] == OppRs232Intf.WING_INP):
inpMask |= (0xff << (8 * wingIndex))
elif (msg[currIndex + 2 + wingIndex] == OppRs232Intf.WING_INCAND):
incandMask |= (0xff << (8 * wingIndex))
elif (msg[currIndex + 2 + wingIndex] == OppRs232Intf.WING_NEO):
hasNeo = True
wingIndex += 1
if (incandMask != 0):
self.opp_incands.append(OPPIncandCard(msg[currIndex], incandMask, self.incandDict))
if (solMask != 0):
self.opp_solenoid.append(OPPSolenoidCard(msg[currIndex], solMask, self.solDict, self))
if (inpMask != 0):
# Create the input object, and add to the command to read all inputs
self.opp_inputs.append(OPPInput(msg[currIndex], inpMask, self.inpDict,
self.inpAddrDict, self.machine))
# Add command to read all inputs to read input message
inpMsg = []
inpMsg.append(msg[currIndex])
inpMsg.append(OppRs232Intf.READ_GEN2_INP_CMD)
inpMsg.append('\x00')
inpMsg.append('\x00')
inpMsg.append('\x00')
inpMsg.append('\x00')
inpMsg.append(OppRs232Intf.calc_crc8_whole_msg(inpMsg))
wholeMsg.extend(inpMsg)
if hasNeo:
self.opp_neopixels.append(OPPNeopixelCard(msg[currIndex], self.neoCardDict, self))
if (not end):
if (msg[currIndex + 7] == OppRs232Intf.EOM_CMD):
end = True
elif (msg[currIndex + 8] == OppRs232Intf.GET_GEN2_CFG):
currIndex += 7
else:
self.log.warning("Malformed GET_GEN2_CFG response:%s.",
"".join(" 0x%02x" % ord(b) for b in msg))
end = True
# TODO: This means synchronization is lost. Send EOM characters
# until they come back
wholeMsg.append(OppRs232Intf.EOM_CMD)
self.read_input_msg = ''.join(wholeMsg)
def vers_resp(self, msg):
# Multiple get version responses can be received at once
self.log.debug("Received Version Response:%s", "".join(" 0x%02x" % ord(b) for b in msg))
end = False
currIndex = 0
while (not end):
# Verify the CRC8 is correct
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, currIndex, 6)
if (msg[currIndex + 6] != crc8):
self.badCRC += 1
hex_string = "".join(" 0x%02x" % ord(b) for b in msg)
self.log.warning("Msg contains bad CRC:%s.", hex_string)
end = True
else:
version = (ord(msg[currIndex + 2]) << 24) | \
(ord(msg[currIndex + 3]) << 16) | \
(ord(msg[currIndex + 4]) << 8) | \
ord(msg[currIndex + 5])
self.log.info("Firmware version: %d.%d.%d.%d", ord(msg[currIndex + 2]),
ord(msg[currIndex + 3]), ord(msg[currIndex + 4]),
ord(msg[currIndex + 5]))
if (version < self.minVersion):
self.minVersion = version
if (version == BAD_FW_VERSION):
self.log.error("Original firmware sent only to Brian before adding "
"real version numbers. The firmware must be updated before "
"MPF will work.")
sys.exit()
self.oppFirmwareVers.append(version)
if (not end):
if (msg[currIndex + 7] == OppRs232Intf.GET_GET_VERS_CMD):
currIndex += 7
elif (msg[currIndex + 7] == OppRs232Intf.EOM_CMD):
end = True
else:
hex_string = "".join(" 0x%02x" % ord(b) for b in msg)
self.log.warning("Malformed GET_GET_VERS_CMD response:%s.", hex_string)
end = True
# TODO: This means synchronization is lost. Send EOM characters
# until they come back
def read_gen2_inp_resp(self, msg):
# Single read gen2 input response. Receive function breaks them down
# Verify the CRC8 is correct
crc8 = OppRs232Intf.calc_crc8_part_msg(msg, 0, 6)
if (msg[6] != crc8):
self.badCRC += 1
hex_string = "".join(" 0x%02x" % ord(b) for b in msg)
self.log.warning("Msg contains bad CRC:%s.", hex_string)
end = True
else:
oppInp = self.inpAddrDict[msg[0]]
newState = (ord(msg[2]) << 24) | \
(ord(msg[3]) << 16) | \
(ord(msg[4]) << 8) | \
ord(msg[5])
# Update the state which holds inputs that are active
if hasattr(oppInp.machine, 'switch_controller'):
changes = oppInp.oldState ^ newState
if (changes != 0):
currBit = 1
for index in range(0, 32):
if ((currBit & changes) != 0):
if ((currBit & newState) == 0):
oppInp.machine.switch_controller.process_switch(state=1,
num=oppInp.cardNum + '-' + str(index))
else:
oppInp.machine.switch_controller.process_switch(state=0,
num=oppInp.cardNum + '-' + str(index))
currBit <<= 1
oppInp.oldState = newState
def configure_driver(self, config, device_type='coil'):
if not self.opp_connection:
self.log.critical("A request was made to configure an OPP solenoid, "
"but no OPP connection is available")
sys.exit()
if not config['number'] in self.solDict:
self.log.critical("A request was made to configure an OPP solenoid "
"with number %s which doesn't exist" % config['number'])
sys.exit()
# Use new update individual solenoid command
_, solenoid = config['number'].split('-')
opp_sol = self.solDict[config['number']]
opp_sol.driver_settings.update(opp_sol.merge_driver_settings(**config))
self.log.debug("Config driver %s, %s, %s", config['number'],
opp_sol.driver_settings['pulse_ms'], opp_sol.driver_settings['hold_power'])
pulse_len = int(opp_sol.driver_settings['pulse_ms'])
hold = int(opp_sol.driver_settings['hold_power'])
solIndex = int(solenoid) * OppRs232Intf.CFG_BYTES_PER_SOL
# If hold is 0, set the auto clear bit
if (hold == 0):
cmd = OppRs232Intf.CFG_SOL_AUTO_CLR
else:
cmd = chr(0)
opp_sol.solCard.currCfgLst[solIndex] = cmd
opp_sol.solCard.currCfgLst[solIndex + OppRs232Intf.INIT_KICK_OFFSET] = chr(pulse_len)
opp_sol.solCard.currCfgLst[solIndex + OppRs232Intf.DUTY_CYCLE_OFFSET] = chr(hold)
msg = []
msg.append(opp_sol.solCard.addr)
msg.append(OppRs232Intf.CFG_IND_SOL_CMD)
msg.append(chr(int(solenoid)))
msg.append(cmd)
msg.append(chr(pulse_len))
msg.append(chr(hold))
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
msg.append(OppRs232Intf.EOM_CMD)
cmd = ''.join(msg)
self.log.debug("Writing individual config: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.opp_connection.send(cmd)
return (opp_sol, config['number'])
def configure_switch(self, config):
# A switch is termed as an input to OPP
if not self.opp_connection:
self.log.critical("A request was made to configure an OPP switch, "
"but no OPP connection is available")
sys.exit()
if not config['number'] in self.inpDict:
self.log.critical("A request was made to configure an OPP switch "
"with number %s which doesn't exist" % config['number'])
sys.exit()
return (self.inpDict[config['number']], config['number'])
def configure_led(self, config):
if not self.opp_connection:
self.log.critical("A request was made to configure an OPP LED, "
"but no OPP connection is available")
sys.exit()
card, pixelNum = config['number'].split('-')
if not card in self.neoCardDict:
self.log.critical("A request was made to configure an OPP neopixel "
"with card number %s which doesn't exist" % card)
sys.exit()
neo = self.neoCardDict[card]
pixel = neo.add_neopixel(int(pixelNum), self.neoDict)
return pixel
def configure_gi(self, config):
self.log.critical("OPP hardware does not support configure GI")
sys.exit()
def configure_matrixlight(self, config):
if not self.opp_connection:
self.log.critical("A request was made to configure an OPP matrix "
"light (incand board), but no OPP connection "
"is available")
sys.exit()
if not config['number'] in self.incandDict:
self.log.critical("A request was made to configure a OPP matrix "
"light (incand board), with number %s "
"which doesn't exist" % config['number'])
sys.exit()
self.incand_reg = True
return (self.incandDict[config['number']], config['number'])
def configure_dmd(self):
self.log.critical("OPP hardware does not support configure DMD")
sys.exit()
def null_dmd_sender(self, *args, **kwargs):
pass
def tick(self):
self.tickCnt += 1
currTick = self.tickCnt % 10
if self.incand_reg:
if (currTick == 5):
self.update_incand()
while not self.receive_queue.empty():
self.process_received_message(self.receive_queue.get(False))
if (currTick == 0):
self.opp_connection.send(self.read_input_msg)
def write_hw_rule(self, switch_obj, sw_activity, driver_obj, driver_action,
disable_on_release=True, drive_now=True,
**driver_settings_overrides):
"""Used to write (or update) a hardware rule to the OPP hardware.
*Hardware Rules* are used to configure the hardware controller to
automatically change driver states based on switch changes. These rules
are completely handled by the hardware (i.e. with no interaction from
the Python game code). They're used for things that you want to happen
fast, like firing coils when flipper buttons are pushed, slingshots, pop
bumpers, etc.
You can overwrite existing hardware rules at any time to change or
remove them.
Args:
switch_obj: Which switch you're creating this rule for. The
parameter is a reference to the switch object itself.
Note: The OPP firmware currently only supports the using the
dedicated switch.
sw_activity: Int which specifies whether this coil should fire when
the switch becomes active (1) or inactive (0)
Note: The OPP firmware currently only supports firing the
coil when switch becomes active.
driver_obj: Driver object this rule is being set for.
driver_action: String 'pulse' or 'hold' which describe what action
will be applied to this driver
drive_now: Should the hardware check the state of the switches when
this rule is first applied, and fire the coils if they should
be? Typically this is True, especially with flippers because you
want them to fire if the player is holding in the buttons when
the machine enables the flippers (which is done via several
calls to this method.)
Note: The OPP firmware always assumes this is True.
"""
# Verify the switch number is correct for the driver number.
card, solenoid = driver_obj.number.split('-')
sw_card, sw_num = switch_obj.number.split('-')
matching_sw = ((int(solenoid) & 0x0c) << 1) | (int(solenoid) & 0x03)
if (card != sw_card) or (matching_sw != int(sw_num)):
self.log.error('Invalid switch being configured for driver. Driver = %s '
'Switch = %s' % (driver_obj.name, switch_obj.name))
return
driver_settings = deepcopy(driver_obj.hw_driver.driver_settings)
driver_settings.update(driver_obj.hw_driver.merge_driver_settings(
**driver_settings_overrides))
self.log.debug("Setting HW Rule. Driver: %s, Driver settings: %s",
driver_obj.name, driver_settings)
self.hw_rules[driver_obj] = {'pulse_ms': driver_settings['pulse_ms'],
'hold_power': driver_settings['hold_power'],
'switch': switch_obj.number}
pulse_len = int(driver_settings['pulse_ms'])
hold = int(driver_settings['hold_power'])
solIndex = int(solenoid) * OppRs232Intf.CFG_BYTES_PER_SOL
# If hold is 0, set the auto clear bit
if (hold == 0):
cmd = chr(ord(OppRs232Intf.CFG_SOL_USE_SWITCH) +
ord(OppRs232Intf.CFG_SOL_AUTO_CLR))
else:
cmd = OppRs232Intf.CFG_SOL_USE_SWITCH
driver_obj.hw_driver.solCard.currCfgLst[solIndex] = cmd
driver_obj.hw_driver.solCard.currCfgLst[solIndex + OppRs232Intf.INIT_KICK_OFFSET] = chr(pulse_len)
driver_obj.hw_driver.solCard.currCfgLst[solIndex + OppRs232Intf.DUTY_CYCLE_OFFSET] = chr(hold)
msg = []
msg.append(driver_obj.hw_driver.solCard.addr)
msg.append(OppRs232Intf.CFG_IND_SOL_CMD)
msg.append(chr(int(solenoid)))
msg.append(cmd)
msg.append(chr(pulse_len))
msg.append(chr(hold))
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
msg.append(OppRs232Intf.EOM_CMD)
cmd = ''.join(msg)
self.log.debug("Writing hardware rule: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.opp_connection.send(cmd)
def clear_hw_rule(self, sw_name):
"""Clears a hardware rule.
This is used if you want to remove the linkage between a switch and
some driver activity. For example, if you wanted to disable your
flippers (so that a player pushing the flipper buttons wouldn't cause
the flippers to flip), you'd call this method with your flipper button
as the *sw_num*.
Args:
sw_name: The string name of the switch whose rule you want to clear.
"""
sw_num = self.machine.switches[sw_name].number
# find the rule(s) based on this switch
coils = [k for k, v in self.hw_rules.iteritems() if v['switch'] == sw_num]
self.log.debug("Clearing HW Rule for switch: %s %s, coils: %s", sw_name,
sw_num, coils)
for driver_obj in coils:
del self.hw_rules[driver_obj]
driver_settings = driver_obj.hw_driver.driver_settings
card, solenoid = driver_obj.number.split('-')
solIndex = int(solenoid) * OppRs232Intf.CFG_BYTES_PER_SOL
cmd = chr(ord(driver_obj.hw_driver.solCard.currCfgLst[solIndex]) & \
~ord(OppRs232Intf.CFG_SOL_USE_SWITCH))
driver_obj.hw_driver.solCard.currCfgLst[solIndex] = cmd
msg = []
msg.append(driver_obj.hw_driver.solCard.addr)
msg.append(OppRs232Intf.CFG_IND_SOL_CMD)
msg.append(chr(int(solenoid)))
msg.append(cmd)
msg.append(driver_obj.hw_driver.solCard.currCfgLst[solIndex + 1])
msg.append(driver_obj.hw_driver.solCard.currCfgLst[solIndex + 2])
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
msg.append(OppRs232Intf.EOM_CMD)
cmd = ''.join(msg)
self.log.debug("Clearing hardware rule: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.opp_connection.send(cmd)
class OPPIncandCard(object):
def __init__(self, addr, mask, incandDict):
self.log = logging.getLogger('OPPIncand')
self.addr = addr
self.oldState = 0
self.newState = 0
self.mask = mask
self.log.debug("Creating OPP Incand at hardware address: 0x%02x",
ord(addr))
card = str(ord(addr) - ord(OppRs232Intf.CARD_ID_GEN2_CARD))
for index in range(0, 32):
if (((1 << index) & mask) != 0):
number = card + '-' + str(index)
incandDict[number] = OPPIncand(self, number)
class OPPIncand(object):
def __init__(self, incandCard, number):
self.incandCard = incandCard
self.number = number
def off(self):
"""Disables (turns off) this matrix light."""
_, incand = self.number.split("-")
currBit = (1 << int(incand))
self.incandCard.newState &= ~currBit
def on(self, brightness=255, fade_ms=0, start=0):
"""Enables (turns on) this driver."""
_, incand = self.number.split("-")
currBit = (1 << int(incand))
if brightness == 0:
self.incandCard.newState &= ~currBit
else:
self.incandCard.newState |= currBit
class OPPSolenoid(object):
def __init__(self, solCard, number):
self.solCard = solCard
self.number = number
self.log = solCard.log
def merge_driver_settings(self,
pulse_ms=None,
pwm_on_ms=None,
pwm_off_ms=None,
pulse_power=None,
hold_power=None,
pulse_power32=None,
hold_power32=None,
pulse_pwm_mask=None,
hold_pwm_mask=None,
recycle_ms=None,
activation_time=None,
**kwargs
):
if pwm_on_ms:
raise ValueError("The setting 'pwm_on_ms' is not valid with the "
"OPP platform. Use hold_power instead.")
if pwm_off_ms:
raise ValueError("The setting 'pwm_off_ms' is not valid with the "
"OPP platform. Use hold_power instead.")
if pulse_power:
raise ValueError("The setting 'pulse_power' is not valid with the "
"OPP platform. Use hold_power instead.")
if pulse_power32:
raise ValueError("The setting 'pulse_power32' is not valid with the "
"OPP platform. Use hold_power instead.")
if hold_power32:
raise ValueError("The setting 'hold_power32' is not valid with the "
"OPP platform. Use hold_power instead.")
if pulse_pwm_mask:
raise ValueError("The setting 'pulse_pwm_mask' is not valid with the "
"OPP platform. Use hold_power instead.")
if hold_pwm_mask:
raise ValueError("The setting 'hold_pwm_mask' is not valid with the "
"OPP platform. Use hold_power instead.")
if recycle_ms:
raise ValueError("The setting 'recycle_ms' is not valid with the "
"OPP platform.")
if activation_time:
raise ValueError("The setting 'activation_time' is not valid with the "
"OPP platform.")
return_dict = dict()
if pulse_ms is not None:
return_dict['pulse_ms'] = str(pulse_ms)
if hold_power is not None:
return_dict['hold_power'] = str(hold_power)
return return_dict
def disable(self):
"""Disables (turns off) this driver. """
card, solenoid = self.number.split("-")
sol_int = int(solenoid)
mask = 1 << sol_int
self.solCard.procCtl &= ~mask
msg = []
msg.append(self.solCard.addr)
msg.append(OppRs232Intf.KICK_SOL_CMD)
msg.append(chr((self.solCard.procCtl >> 8) & 0xff))
msg.append(chr(self.solCard.procCtl & 0xff))
msg.append(chr((mask >> 8) & 0xff))
msg.append(chr(mask & 0xff))
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
cmd = ''.join(msg)
self.log.debug("Disabling solenoid driver: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.solCard.platform.opp_connection.send(cmd)
def enable(self):
"""Enables (turns on) this driver. """
card, solenoid = self.number.split("-")
sol_int = int(solenoid)
mask = 1 << sol_int
self.solCard.procCtl |= mask
msg = []
msg.append(self.solCard.addr)
msg.append(OppRs232Intf.KICK_SOL_CMD)
msg.append(chr((self.solCard.procCtl >> 8) & 0xff))
msg.append(chr(self.solCard.procCtl & 0xff))
msg.append(chr((mask >> 8) & 0xff))
msg.append(chr(mask & 0xff))
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
cmd = ''.join(msg)
self.log.debug("Enabling solenoid driver: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.solCard.platform.opp_connection.send(cmd)
def pulse(self, milliseconds=None):
"""Pulses this driver. """
error = False
if milliseconds and (milliseconds != int(self.driver_settings['pulse_ms'])):
self.log.warn("OPP platform doesn't allow changing pulse width using pulse call. " \
"Tried %d, used %s", milliseconds, self.driver_settings['pulse_ms'])
if (int(self.driver_settings['hold_power']) != 0):
self.log.warn("OPP platform, trying to pulse a solenoid with a hold_power. " \
"That would lock the driver on. Use enable/disable calls.")
error = True
if (not error):
_, solenoid = self.number.split("-")
mask = 1 << int(solenoid)
msg = []
msg.append(self.solCard.addr)
msg.append(OppRs232Intf.KICK_SOL_CMD)
msg.append(chr((mask >> 8) & 0xff))
msg.append(chr(mask & 0xff))
msg.append(chr((mask >> 8) & 0xff))
msg.append(chr(mask & 0xff))
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
cmd = ''.join(msg)
self.log.debug("Pulse driver: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.solCard.platform.opp_connection.send(cmd)
hex_ms_string = self.driver_settings['pulse_ms']
return Util.hex_string_to_int(hex_ms_string)
class OPPSolenoidCard(object):
def __init__(self, addr, mask, solDict, platform):
self.log = logging.getLogger('OPPSolenoid')
self.addr = addr
self.mask = mask
self.platform = platform
self.state = 0
self.procCtl = 0
self.currCfgLst = ['\x00' for _ in range(OppRs232Intf.NUM_G2_SOL_PER_BRD *
OppRs232Intf.CFG_BYTES_PER_SOL)]
self.log.debug("Creating OPP Solenoid at hardware address: 0x%02x",
ord(addr))
card = str(ord(addr) - ord(OppRs232Intf.CARD_ID_GEN2_CARD))
for index in range(0, 16):
if (((1 << index) & mask) != 0):
number = card + '-' + str(index)
opp_sol = OPPSolenoid(self, number)
opp_sol.driver_settings = self.create_driver_settings(platform.machine)
solDict[card + '-' + str(index)] = opp_sol
def create_driver_settings(self, machine):
return_dict = dict()
pulse_ms = machine.config['mpf']['default_pulse_ms']
return_dict['pulse_ms'] = str(pulse_ms)
return_dict['hold_power'] = '0'
return return_dict
class OPPInput(object):
def __init__(self, addr, mask, inpDict, inpAddrDict, machine):
self.log = logging.getLogger('OPPInput')
self.addr = addr
self.oldState = 0
self.mask = mask
self.cardNum = str(ord(addr) - ord(OppRs232Intf.CARD_ID_GEN2_CARD))
self.machine = machine
self.log.debug("Creating OPP Input at hardware address: 0x%02x",
ord(addr))
inpAddrDict[addr] = self
for index in range(0, 32):
if (((1 << index) & mask) != 0):
inpDict[self.cardNum + '-' + str(index)] = self
class OPPNeopixelCard(object):
def __init__(self, addr, neoCardDict, platform):
self.log = logging.getLogger('OPPNeopixel')
self.addr = addr
self.platform = platform
self.card = str(ord(addr) - ord(OppRs232Intf.CARD_ID_GEN2_CARD))
self.numPixels = 0
self.numColorEntries = 0
self.colorTableDict = dict()
neoCardDict[self.card] = self
self.log.debug("Creating OPP Neopixel card at hardware address: 0x%02x",
ord(addr))
def add_neopixel(self, number, neoDict):
if number > self.numPixels:
self.numPixels = number + 1
pixel_number = self.card + '-' + str(number)
pixel = OPPNeopixel(pixel_number, self)
neoDict[pixel_number] = pixel
return pixel
class OPPNeopixel(object):
def __init__(self, number, neoCard):
self.log = logging.getLogger('OPPNeopixel')
self.number = number
self.current_color = '000000'
self.neoCard = neoCard
_, index = number.split('-')
self.index_char = chr(int(index))
self.log.debug("Creating OPP Neopixel: %s",
number)
def rgb_to_hex(self, rgb):
return '%02x%02x%02x' % (rgb[0], rgb[1], rgb[2])
def color(self, color):
"""Instantly sets this LED to the color passed.
Args:
color: a 3-item list of integers representing R, G, and B values,
0-255 each.
"""
# todo this is crazy inefficient right now. todo change it so it can use
# hex strings as the color throughout
new_color = self.rgb_to_hex(color)
error = False
# Check if this color exists in the color table
if not new_color in self.neoCard.colorTableDict:
# Check if there are available spaces in the table
if (self.neoCard.numColorEntries < 32):
# Send the command to add color table entry
self.neoCard.colorTableDict[new_color] = \
chr(self.neoCard.numColorEntries + OppRs232Intf.NEO_CMD_ON)
msg = []
msg.append(self.neoCard.addr)
msg.append(OppRs232Intf.CHNG_NEO_COLOR_TBL)
msg.append(chr(self.neoCard.numColorEntries))
msg.append(chr(int(new_color[2:4],16)))
msg.append(chr(int(new_color[:2],16)))
msg.append(chr(int(new_color[-2:],16)))
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
cmd = ''.join(msg)
self.log.debug("Add Neo color table entry: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.neoCard.platform.opp_connection.send(cmd)
self.neoCard.numColorEntries += 1
else:
error = True
self.log.warn("Not enough Neo color table entries. "
"OPP only supports 32.")
# Send msg to set the neopixel
if (not error):
msg = []
msg.append(self.neoCard.addr)
msg.append(OppRs232Intf.SET_IND_NEO_CMD)
msg.append(self.index_char)
msg.append(self.neoCard.colorTableDict[new_color])
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
cmd = ''.join(msg)
self.log.debug("Set Neopixel color: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.neoCard.platform.opp_connection.send(cmd)
class SerialCommunicator(object):
def __init__(self, platform, port, baud, send_queue, receive_queue):
self.machine = platform.machine
self.platform = platform
self.send_queue = send_queue
self.receive_queue = receive_queue
self.debug = False
self.log = self.platform.log
self.partMsg = ""
self.remote_processor = "OPP Gen2"
self.remote_model = None
self.log.info("Connecting to %s at %sbps", port, baud)
try:
self.serial_connection = serial.Serial(port=port, baudrate=baud,
timeout=.01, writeTimeout=0)
except serial.SerialException:
self.log.error('Could not open port: %s' % port)
sys.exit()
self.identify_connection()
self.platform.register_processor_connection(self.remote_processor, self)
self._start_threads()
def identify_connection(self):
"""Identifies which processor this serial connection is talking to."""
# keep looping and wait for an ID response
count = 0
while True:
if ((count % 10) == 0):
self.log.debug("Sending EOM command to port '%s'",
self.serial_connection.name)
count += 1
self.serial_connection.write(OppRs232Intf.EOM_CMD)
time.sleep(.01)
resp = self.serial_connection.read(30)
if resp.startswith(OppRs232Intf.EOM_CMD):
break
if (count == 100):
self.log.error('No response from OPP hardware: %s' %
self.serial_connection.name)
sys.exit()
# Send inventory command to figure out number of cards
msg = []
msg.append(OppRs232Intf.INV_CMD)
msg.append(OppRs232Intf.EOM_CMD)
cmd = ''.join(msg)
self.log.debug("Sending inventory command: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.serial_connection.write(cmd)
time.sleep(.1)
resp = self.serial_connection.read(30)
# resp will contain the inventory response.
self.platform.process_received_message(resp)
# Now send get gen2 configuration message to find populated wing boards
self.send_get_gen2_cfg_cmd()
time.sleep(.1)
resp = self.serial_connection.read(30)
# resp will contain the gen2 cfg reponses. That will end up creating all the
# correct objects.
self.platform.process_received_message(resp)
# get the version of the firmware
self.send_vers_cmd()
time.sleep(.1)
resp = self.serial_connection.read(30)
self.platform.process_received_message(resp)
# see if version of firmware is new enough
if (self.platform.minVersion < MIN_FW):
self.log.critical("Firmware version mismatch. MPF requires"
" the %s processor to be firmware %s, but yours is %s",
self.remote_processor, create_vers_str(MIN_FW),
create_vers_str(self.platform.minVersion))
sys.exit()
# get initial value for inputs
self.serial_connection.write(self.platform.read_input_msg)
time.sleep(.1)
resp = self.serial_connection.read(100)
self.log.debug("Init get input response: %s", "".join(" 0x%02x" % ord(b) for b in resp))
self.platform.process_received_message(resp)
def send_get_gen2_cfg_cmd(self):
# Now send get gen2 configuration message to find populated wing boards
wholeMsg = []
for cardAddr in self.platform.gen2AddrArr:
# Turn on the bulbs that are non-zero
msg = []
msg.append(cardAddr)
msg.append(OppRs232Intf.GET_GEN2_CFG)
msg.append('\x00')
msg.append('\x00')
msg.append('\x00')
msg.append('\x00')
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
wholeMsg.extend(msg)
wholeMsg.append(OppRs232Intf.EOM_CMD)
cmd = ''.join(wholeMsg)
self.log.debug("Sending get Gen2 Cfg command: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.serial_connection.write(cmd)
def send_vers_cmd(self):
# Now send get firmware version message
wholeMsg = []
for cardAddr in self.platform.gen2AddrArr:
# Turn on the bulbs that are non-zero
msg = []
msg.append(cardAddr)
msg.append(OppRs232Intf.GET_GET_VERS_CMD)
msg.append('\x00')
msg.append('\x00')
msg.append('\x00')
msg.append('\x00')
msg.append(OppRs232Intf.calc_crc8_whole_msg(msg))
wholeMsg.extend(msg)
wholeMsg.append(OppRs232Intf.EOM_CMD)
cmd = ''.join(wholeMsg)
self.log.debug("Sending get version command: %s", "".join(" 0x%02x" % ord(b) for b in cmd))
self.serial_connection.write(cmd)
def create_vers_str(self, version_int):
return ("%02d.%02d.%02d.%02d" % (((version_int >> 24) & 0xff),
((version_int >> 16) & 0xff), ((version_int >> 8) & 0xff),
(version_int & 0xff)))
def _start_threads(self):
self.serial_connection.timeout = None
self.receive_thread = threading.Thread(target=self._receive_loop)
self.receive_thread.daemon = True
self.receive_thread.start()
self.sending_thread = threading.Thread(target=self._sending_loop)
self.sending_thread.daemon = True
self.sending_thread.start()
def stop(self):
"""Stops and shuts down this serial connection."""
self.log.error("Stop called on serial connection")
self.serial_connection.close()
self.serial_connection = None # child threads stop when this is None
# todo clear the hw?
def send(self, msg):
"""Sends a message to the remote processor over the serial connection.
Args:
msg: String of the message you want to send. We don't need no
steenking line feed character
"""
self.send_queue.put(msg)
def _sending_loop(self):
debug = self.platform.config['debug']
try:
while self.serial_connection:
msg = self.send_queue.get()
self.serial_connection.write(msg)
if debug:
self.log.info("Sending: %s", "".join(" 0x%02x" % ord(b) for b in msg))
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
msg = ''.join(line for line in lines)
self.machine.crash_queue.put(msg)
def _receive_loop(self):
debug = self.platform.config['debug']
try:
self.log.info("Start rcv loop")
while self.serial_connection:
resp = self.serial_connection.read(30)
if debug:
self.log.info("Received: %s", "".join(" 0x%02x" % ord(b) for b in resp))
self.partMsg += resp
endString = False
strlen = len(self.partMsg)
lostSynch = False
# Split into individual responses
while (strlen >= 7) and (not endString):
# Check if this is a gen2 card address
if ((ord(self.partMsg[0]) & 0xe0) == 0x20):
# Only command expect to receive back is
if (self.partMsg[1] == OppRs232Intf.READ_GEN2_INP_CMD):
self.receive_queue.put(self.partMsg[:7])
self.partMsg = self.partMsg[7:]
strlen -= 7
else:
# Lost synch
self.partMsg = self.partMsg[2:]
strlen -= 2
lostSynch = True
elif (self.partMsg[0] == OppRs232Intf.EOM_CMD):
self.partMsg = self.partMsg[1:]
strlen -= 1
else:
# Lost synch
self.partMsg = self.partMsg[1:]
strlen -= 1
lostSynch = True
if lostSynch:
while (strlen > 0):
if ((ord(self.partMsg[0]) & 0xe0) == 0x20):
lostSynch = False
break;
self.partMsg = self.partMsg[1:]
strlen -= 1
self.log.critical("Exit rcv loop")
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value,
exc_traceback)
msg = ''.join(line for line in lines)
self.log.critical("!!! Receive loop error exception")
self.machine.crash_queue.put(msg)
self.log.critical("!!! Receive loop exited")
# The MIT License (MIT)
# Oringal code on which this module was based:
# Copyright (c) 2009-2011 Adam Preble and Gerry Stellenberg, but almost
# everything has been changed at this point.
# Copyright (c) 2016 Hugh Spahr
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
|
|
import unittest
from unittest import mock
from django.test import override_settings
# Fixes the Cache-Control error in tests. Must appear before view imports.
mock.patch('django.views.decorators.cache.never_cache', lambda x: x).start()
from tethys_portal.views.user import profile, settings, change_password, social_disconnect, delete_account, \
manage_storage, clear_workspace # noqa: E402
from tethys_apps.models import TethysApp # noqa: E402
class TethysPortalUserTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@override_settings(MFA_REQUIRED=False)
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user.has_mfa')
@mock.patch('tethys_portal.views.user._convert_storage_units')
@mock.patch('tethys_portal.views.user.get_quota')
@mock.patch('tethys_portal.views.user.render')
@mock.patch('tethys_portal.views.user.Token.objects.get_or_create')
def test_profile(self, mock_token_get_create, mock_render, mock_get_quota, mock_convert_units,
mock_has_mfa, _):
mock_request = mock.MagicMock()
mock_user_token = mock.MagicMock()
mock_token_created = mock.MagicMock()
mock_token_get_create.return_value = mock_user_token, mock_token_created
mock_convert_units.return_value = '0 bytes'
mock_get_quota.return_value = {'quota': None}
mock_has_mfa.return_value = False
expected_context = {
'user_token': mock_user_token.key,
'current_use': '0 bytes',
'quota': None,
'has_mfa': False,
'mfa_required': False,
'show_user_token_mfa': True
}
profile(mock_request)
mock_render.assert_called_with(mock_request, 'tethys_portal/user/profile.html', expected_context)
@override_settings(MFA_REQUIRED=False)
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user.has_mfa')
@mock.patch('tethys_portal.views.user._convert_storage_units')
@mock.patch('tethys_portal.views.user.get_quota')
@mock.patch('tethys_portal.views.user.render')
@mock.patch('tethys_portal.views.user.Token.objects.get_or_create')
def test_profile_quota(self, mock_token_get_create, mock_render, mock_get_quota,
mock_convert_units, mock_has_mfa, _):
mock_request = mock.MagicMock()
mock_user = mock.MagicMock()
mock_request.user = mock_user
mock_user_token = mock.MagicMock()
mock_token_created = mock.MagicMock()
mock_token_get_create.return_value = mock_user_token, mock_token_created
mock_convert_units.return_value = '0 bytes'
mock_get_quota.return_value = {'quota': 1, 'units': 0}
mock_has_mfa.return_value = False
expected_context = {
'user_token': mock_user_token.key,
'current_use': '0 bytes',
'quota': '0 bytes',
'has_mfa': False,
'mfa_required': False,
'show_user_token_mfa': True # Show user token b/c mfa is not required
}
profile(mock_request)
mock_render.assert_called_with(mock_request, 'tethys_portal/user/profile.html', expected_context)
mock_token_get_create.assert_called_with(user=mock_user)
@override_settings(MFA_REQUIRED=True)
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user.has_mfa')
@mock.patch('tethys_portal.views.user._convert_storage_units')
@mock.patch('tethys_portal.views.user.get_quota')
@mock.patch('tethys_portal.views.user.render')
@mock.patch('tethys_portal.views.user.Token.objects.get_or_create')
def test_profile_mfa_required_no_mfa_set(self, mock_token_get_create, mock_render, mock_get_quota,
mock_convert_units, mock_has_mfa, _):
mock_request = mock.MagicMock()
mock_user_token = mock.MagicMock()
mock_token_created = mock.MagicMock()
mock_token_get_create.return_value = mock_user_token, mock_token_created
mock_convert_units.return_value = '0 bytes'
mock_get_quota.return_value = {'quota': None}
mock_has_mfa.return_value = False
expected_context = {
'user_token': mock_user_token.key,
'current_use': '0 bytes',
'quota': None,
'has_mfa': False,
'mfa_required': True,
'show_user_token_mfa': False # Don't show user token b/c mfa is required but user has not setup mfa
}
profile(mock_request)
mock_render.assert_called_with(mock_request, 'tethys_portal/user/profile.html', expected_context)
@override_settings(MFA_REQUIRED=True)
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user.has_mfa')
@mock.patch('tethys_portal.views.user._convert_storage_units')
@mock.patch('tethys_portal.views.user.get_quota')
@mock.patch('tethys_portal.views.user.render')
@mock.patch('tethys_portal.views.user.Token.objects.get_or_create')
def test_profile_mfa_required_mfa_set(self, mock_token_get_create, mock_render, mock_get_quota,
mock_convert_units, mock_has_mfa, _):
mock_request = mock.MagicMock()
mock_user_token = mock.MagicMock()
mock_token_created = mock.MagicMock()
mock_token_get_create.return_value = mock_user_token, mock_token_created
mock_convert_units.return_value = '0 bytes'
mock_get_quota.return_value = {'quota': None}
mock_has_mfa.return_value = True
expected_context = {
'user_token': mock_user_token.key,
'current_use': '0 bytes',
'quota': None,
'has_mfa': True,
'mfa_required': True,
'show_user_token_mfa': True # Show user token b/c mfa is required and user has setup mfa
}
profile(mock_request)
mock_render.assert_called_with(mock_request, 'tethys_portal/user/profile.html', expected_context)
@override_settings(MFA_REQUIRED=False)
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user.has_mfa')
@mock.patch('tethys_portal.views.user._convert_storage_units')
@mock.patch('tethys_portal.views.user.get_quota')
@mock.patch('tethys_portal.views.user.render')
@mock.patch('tethys_portal.views.user.Token.objects.get_or_create')
def test_profile_mfa_not_required_mfa_set(self, mock_token_get_create, mock_render, mock_get_quota,
mock_convert_units, mock_has_mfa, _):
mock_request = mock.MagicMock()
mock_user_token = mock.MagicMock()
mock_token_created = mock.MagicMock()
mock_token_get_create.return_value = mock_user_token, mock_token_created
mock_convert_units.return_value = '0 bytes'
mock_get_quota.return_value = {'quota': None}
mock_has_mfa.return_value = True
expected_context = {
'user_token': mock_user_token.key,
'current_use': '0 bytes',
'quota': None,
'has_mfa': True,
'mfa_required': False,
'show_user_token_mfa': True # Show user token b/c not mfa is required
}
profile(mock_request)
mock_render.assert_called_with(mock_request, 'tethys_portal/user/profile.html', expected_context)
@mock.patch('tethys_portal.views.user.UserSettingsForm')
@mock.patch('tethys_portal.views.user.redirect')
def test_settings_request_post(self, mock_redirect, mock_usf):
mock_first_name = mock.MagicMock()
mock_last_name = mock.MagicMock()
mock_email = mock.MagicMock()
mock_user = mock.MagicMock()
mock_user.username = 'foo'
mock_user.first_name = mock_first_name
mock_user.last_name = mock_last_name
mock_user.email = mock_email
mock_request = mock.MagicMock()
mock_request.user = mock_user
mock_request.method = 'POST'
mock_request.POST = 'user-settings-submit'
mock_form = mock.MagicMock()
mock_form.is_valid.return_value = True
mock_usf.return_value = mock_form
settings(mock_request)
mock_user.save.assert_called()
mock_usf.assert_called_once_with(mock_request.POST)
mock_redirect.assert_called_once_with('user:profile')
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user.django_settings')
@mock.patch('tethys_portal.views.user.Token.objects.get_or_create')
@mock.patch('tethys_portal.views.user.UserSettingsForm')
@mock.patch('tethys_portal.views.user.render')
def test_settings_request_get(self, mock_render, mock_usf, mock_token_get_create, mock_django_settings, _):
mock_request_user = mock.MagicMock()
mock_request_user.username = 'foo'
mock_request = mock.MagicMock()
mock_request.user = mock_request_user
mock_request.method = 'GET'
mock_form = mock.MagicMock()
mock_usf.return_value = mock_form
mock_user_token = mock.MagicMock()
mock_token_created = mock.MagicMock()
mock_token_get_create.return_value = mock_user_token, mock_token_created
mock_django_settings.MFA_REQUIRED = False
expected_context = {'form': mock_form,
'user_token': mock_user_token.key,
'current_use': '0 bytes',
'quota': None,
'mfa_required': False,
'has_mfa': False,
'show_user_token_mfa': True
}
settings(mock_request)
mock_usf.assert_called_once_with(instance=mock_request_user)
mock_token_get_create.assert_called_once_with(user=mock_request_user)
mock_render.assert_called_once_with(mock_request, 'tethys_portal/user/settings.html', expected_context)
@mock.patch('tethys_portal.views.user.UserPasswordChangeForm')
@mock.patch('tethys_portal.views.user.redirect')
def test_change_password_post(self, mock_redirect, mock_upf):
mock_user = mock.MagicMock()
mock_user.username = 'foo'
mock_request = mock.MagicMock()
mock_request.user = mock_user
mock_request.method = 'POST'
mock_request.POST = 'change-password-submit'
mock_form = mock.MagicMock()
mock_form.is_valid.return_value = True
mock_upf.return_value = mock_form
change_password(mock_request)
mock_redirect.assert_called_once_with('user:settings')
mock_form.clean_old_password.assert_called()
mock_form.clean_new_password2.assert_called()
mock_form.save.assert_called()
mock_upf.assert_called_once_with(user=mock_request.user, data=mock_request.POST)
@mock.patch('tethys_portal.views.user.UserPasswordChangeForm')
@mock.patch('tethys_portal.views.user.render')
def test_change_password_get(self, mock_render, mock_upf):
mock_request_user = mock.MagicMock()
mock_request_user.username = 'foo'
mock_request = mock.MagicMock()
mock_request.user = mock_request_user
mock_request.method = 'GET'
mock_form = mock.MagicMock()
mock_upf.return_value = mock_form
expected_context = {'form': mock_form}
change_password(mock_request)
mock_upf.assert_called_once_with(user=mock_request_user)
mock_render.assert_called_once_with(mock_request, 'tethys_portal/user/change_password.html', expected_context)
@mock.patch('tethys_portal.views.user.render')
def test_social_disconnect_valid_user(self, mock_render):
mock_request_user = mock.MagicMock()
mock_request_user.username = 'foo'
mock_request = mock.MagicMock()
mock_request.user = mock_request_user
mock_provider = mock.MagicMock()
mock_association_id = mock.MagicMock()
expected_context = {'provider': mock_provider,
'association_id': mock_association_id}
social_disconnect(mock_request, mock_provider, mock_association_id)
mock_render.assert_called_once_with(mock_request, 'tethys_portal/user/disconnect.html', expected_context)
@mock.patch('tethys_portal.views.user.messages.success')
@mock.patch('tethys_portal.views.user.logout')
@mock.patch('tethys_portal.views.user.redirect')
def test_delete_account_post(self, mock_redirect, mock_logout, mock_messages_success):
mock_user = mock.MagicMock()
mock_user.username = 'foo'
mock_request = mock.MagicMock()
mock_request.user = mock_user
mock_request.method = 'POST'
mock_request.POST = 'delete-account-submit'
delete_account(mock_request)
mock_request.user.delete.assert_called()
mock_logout.assert_called_once_with(mock_request)
mock_messages_success.assert_called_once_with(mock_request, 'Your account has been successfully deleted.')
mock_redirect.assert_called_once_with('home')
@mock.patch('tethys_portal.views.user.render')
def test_delete_account_not_post(self, mock_render):
mock_user = mock.MagicMock()
mock_user.username = 'foo'
mock_request = mock.MagicMock()
mock_request.user = mock_user
mock_request.method = 'GET'
delete_account(mock_request)
expected_context = {}
mock_render.assert_called_once_with(mock_request, 'tethys_portal/user/delete.html', expected_context)
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user._get_user_workspace')
@mock.patch('tethys_portal.views.user._convert_storage_units')
@mock.patch('tethys_portal.views.user.SingletonHarvester')
@mock.patch('tethys_portal.views.user.render')
def test_manage_storage_successful(self, mock_render, mock_harvester, mock_convert_storage, _, __):
mock_request = mock.MagicMock()
mock_request.user.username = 'ThisIsMe'
app = TethysApp(name="app_name")
mock_harvester().apps = [app]
mock_convert_storage.return_value = '0 bytes'
expected_context = {'apps': mock_harvester().apps,
'current_use': '0 bytes',
'quota': None,
}
manage_storage(mock_request)
mock_render.assert_called_once_with(mock_request, 'tethys_portal/user/manage_storage.html', expected_context)
@mock.patch('tethys_quotas.utilities.log')
@mock.patch('tethys_portal.views.user.TethysApp')
@mock.patch('tethys_portal.views.user.render')
def test_clear_workspace_display(self, mock_render, mock_TethysApp, _):
mock_request = mock.MagicMock()
mock_request.user.username = 'ThisIsMe'
expected_context = {'app_name': mock_TethysApp.objects.get().name}
clear_workspace(mock_request, 'root_url')
mock_render.assert_called_once_with(mock_request, 'tethys_portal/user/clear_workspace.html', expected_context)
@mock.patch('tethys_portal.views.user.get_app_class')
@mock.patch('tethys_portal.views.user._get_user_workspace')
@mock.patch('tethys_portal.views.user.TethysApp')
@mock.patch('tethys_portal.views.user.messages.success')
@mock.patch('tethys_portal.views.user.redirect')
def test_clear_workspace_successful(self, mock_redirect, mock_message, mock_app, mock_guw,
mock_get_app_class): # noqa: E501
mock_request = mock.MagicMock(method='POST', POST='clear-workspace-submit')
mock_request.user.username = 'ThisIsMe'
app = TethysApp(name='app_name')
mock_app.objects.get.return_value = app
mock_get_app_class.return_value = app
app.pre_delete_user_workspace = mock.MagicMock()
app.post_delete_user_workspace = mock.MagicMock()
mock_guw.return_value = mock.MagicMock()
clear_workspace(mock_request, 'root_url')
mock_message.assert_called_once_with(mock_request, 'Your workspace has been successfully cleared.')
mock_redirect.assert_called_once_with('user:manage_storage')
|
|
"""Helper functions for items"""
import json
from flask import jsonify, request
from hydra_python_core.doc_writer import HydraStatus, HydraError
from hydrus.data import crud
from hydrus.data.exceptions import (
ClassNotFound,
InstanceNotFound,
InstanceExists,
InvalidDateTimeFormat,
PropertyNotFound,
MemberInstanceNotFound,
)
from hydrus.data.helpers import (
set_response_headers,
finalize_response,
hydrafy,
getType,
send_sync_update,
get_link_props,
error_response,
validate_object,
parse_collection_members,
get_collections_and_parsed_classes,
)
from hydrus.utils import get_session, get_api_name, get_hydrus_server_url, get_doc
from hydrus.extensions.socketio_factory import socketio
def items_get_check_support(id_, class_type, class_path, path, is_collection=False):
"""Check if class_type supports GET operation"""
try:
# Try getting the Item based on ID and Class type
response = crud.get(
id_,
class_type,
api_name=get_api_name(),
session=get_session(),
path=path,
collection=is_collection,
)
response = finalize_response(class_path, response)
return set_response_headers(jsonify(hydrafy(response, path=path)))
except (ClassNotFound, InstanceNotFound) as e:
error = e.get_HTTP()
return error_response(error)
def items_post_check_support(id_, object_, class_path, path, is_collection):
"""Check if class_type supports POST operation"""
collections, parsed_classes = get_collections_and_parsed_classes()
doc = get_doc()
if path in parsed_classes:
class_path = path
obj_type = getType(path, "PUT")
elif path in collections:
collection = collections[path]["collection"]
class_path = collection.path
obj_type = collection.name
link_props, link_type_check = get_link_props(class_path, object_)
# Load new object and type
if validate_object(object_, obj_type, class_path) and link_type_check:
if is_collection:
object_ = parse_collection_members(object_)
try:
# Update the right ID if the object is valid and matches
# type of Item
object_id = crud.update(
doc,
object_=object_,
id_=id_,
type_=object_["@type"],
session=get_session(),
api_name=get_api_name(),
collection=is_collection,
)
method = "POST"
resource_url = (
f"{get_hydrus_server_url()}{get_api_name()}/{path}/{object_id}"
)
last_job_id = crud.get_last_modification_job_id(session=get_session())
new_job_id = crud.insert_modification_record(
method, resource_url, session=get_session()
)
send_sync_update(
socketio=socketio,
new_job_id=new_job_id,
last_job_id=last_job_id,
method=method,
resource_url=resource_url,
)
headers_ = [{"Location": resource_url}]
status_description = f"Object with ID {object_id} successfully " "updated"
status = HydraStatus(
code=200, title="Object updated", desc=status_description
)
status_response = status.generate()
status_response["iri"] = resource_url
return set_response_headers(jsonify(status_response), headers=headers_)
except (ClassNotFound, InstanceNotFound, InstanceExists,
PropertyNotFound, InvalidDateTimeFormat) as e:
error = e.get_HTTP()
return error_response(error)
else:
error = HydraError(code=400, title="Data is not valid")
return error_response(error)
def items_put_check_support(id_, class_path, path, is_collection):
"""Check if class_type supports PUT operation"""
object_ = json.loads(request.data.decode("utf-8"))
doc = get_doc()
collections, parsed_classes = get_collections_and_parsed_classes()
if path in parsed_classes:
class_path = path
obj_type = getType(path, "PUT")
elif path in collections:
collection = collections[path]["collection"]
class_path = collection.path
obj_type = collection.name
link_props, link_type_check = get_link_props(class_path, object_)
# Load new object and type
if validate_object(object_, obj_type, class_path) and link_type_check:
if is_collection:
object_ = parse_collection_members(object_)
try:
# Add the object with given ID
object_id = crud.insert(
doc,
object_=object_,
id_=id_,
session=get_session(),
collection=is_collection,
)
resource_url = (
f"{get_hydrus_server_url()}{get_api_name()}/{path}/{object_id}"
)
headers_ = [{"Location": resource_url}]
status_description = f"Object with ID {object_id} successfully added"
status = HydraStatus(
code=201, title="Object successfully added.", desc=status_description
)
status_response = status.generate()
status_response["iri"] = resource_url
return set_response_headers(
jsonify(status_response), headers=headers_, status_code=status.code
)
except (ClassNotFound, InstanceExists,
PropertyNotFound, InvalidDateTimeFormat) as e:
error = e.get_HTTP()
return error_response(error)
else:
error = HydraError(code=400, title="Data is not valid")
return error_response(error)
def items_delete_check_support(id_, class_type, path, is_collection):
"""Check if class_type supports PUT operation"""
try:
# Delete the Item with ID == id_
# for colletions, id_ is corresponding to their collection_id and not the id_
# primary key
crud.delete(id_, class_type, session=get_session(), collection=is_collection)
method = "DELETE"
resource_url = f"{get_hydrus_server_url()}{get_api_name()}/{path}/{id_}"
last_job_id = crud.get_last_modification_job_id(session=get_session())
new_job_id = crud.insert_modification_record(
method, resource_url, session=get_session()
)
send_sync_update(
socketio=socketio,
new_job_id=new_job_id,
last_job_id=last_job_id,
method=method,
resource_url=resource_url,
)
status_description = f"Object with ID {id_} successfully deleted"
status = HydraStatus(
code=200, title="Object successfully deleted.", desc=status_description
)
return set_response_headers(jsonify(status.generate()))
except (ClassNotFound, InstanceNotFound) as e:
error = e.get_HTTP()
return error_response(error)
def member_get_check_support(collection_id, member_id, class_type, class_path, path):
"""Check if class_type supports GET operation"""
try:
# Try getting the Item based on Collection ID and Member ID and Class type
response = crud.get_member(
collection_id,
member_id,
class_type,
api_name=get_api_name(),
session=get_session(),
path=path,
)
response = finalize_response(class_path, response)
return set_response_headers(jsonify(hydrafy(response, path=path)))
except (ClassNotFound, MemberInstanceNotFound) as e:
error = e.get_HTTP()
return error_response(error)
def member_delete_check_support(collection_id, member_id, class_type, path):
"""Check if class_type supports DELETE operation"""
try:
# Delete the Item with IDs collection_id and member_id
# collection_id is id of a collection
# member_id is the id of member of a collection
crud.delete_member(collection_id, member_id, class_type, session=get_session())
method = "DELETE"
resource_url = (
f"{get_hydrus_server_url()}{get_api_name()}/{path}/{collection_id}"
)
last_job_id = crud.get_last_modification_job_id(session=get_session())
new_job_id = crud.insert_modification_record(
method, resource_url, session=get_session()
)
status_description = (
f"Object with ID {member_id} successfully"
f" deleted from Collection with ID {collection_id}"
)
status = HydraStatus(
code=200, title="Object successfully deleted.", desc=status_description
)
return set_response_headers(jsonify(status.generate()))
except (ClassNotFound, MemberInstanceNotFound) as e:
error = e.get_HTTP()
return error_response(error)
|
|
from nose.tools import * # noqa
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.factories import AuthUserFactory, ProjectFactory, BookmarkCollectionFactory
from scripts.analytics.addon_snapshot import AddonSnapshot
from website.models import Node
from framework.auth.core import User
from website.settings import ADDONS_AVAILABLE
from website.addons.github.tests.factories import GitHubAccountFactory
from website.addons.github.model import GitHubNodeSettings, GitHubUserSettings
from website.addons.googledrive.tests.factories import GoogleDriveAccountFactory
from website.addons.googledrive.model import GoogleDriveNodeSettings, GoogleDriveUserSettings
class TestAddonCount(OsfTestCase):
def setUp(self):
super(TestAddonCount, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.external_account = GitHubAccountFactory(display_name='hmoco1')
self.user_settings = self.user.get_or_add_addon('github')
self.user_settings.save()
self.user.external_accounts.append(self.external_account)
self.user.save()
self.node.add_addon('github', Auth(self.user))
self.node_addon = self.node.get_addon('github')
self.node_addon.user = self.user.fullname
self.node_addon.repo = '29 #Strafford APTS'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.external_account
self.node_addon.save()
self.user_settings.grant_oauth_access(
node=self.node,
external_account=self.external_account,
)
def tearDown(self):
GitHubNodeSettings.remove()
GitHubUserSettings.remove()
GoogleDriveNodeSettings.remove()
GoogleDriveUserSettings.remove()
def test_run_for_all_addon(self):
results = AddonSnapshot().get_events()
names = [res['provider']['name'] for res in results]
for addon in ADDONS_AVAILABLE:
assert_in(addon.short_name, names)
def test_one_user_one_node_one_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(github_res['nodes']['total'], 1)
def test_one_user_one_node_one_addon_one_node_linked(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(github_res['nodes']['total'], 1)
def test_one_user_with_multiple_githubs(self):
oauth_settings2 = GitHubAccountFactory(display_name='hmoco2')
oauth_settings2.save()
self.user.external_accounts.append(oauth_settings2)
self.user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 1)
# import ipdb; ipdb.set_trace()
def test_one_user_with_multiple_addons(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(googledrive_res['users']['enabled'], 0)
self.user.add_addon('googledrive')
oauth_settings = GoogleDriveAccountFactory()
oauth_settings.save()
self.user.external_accounts.append(oauth_settings)
self.user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['users']['enabled'], 1)
assert_equal(googledrive_res['users']['enabled'], 1)
def test_many_users_each_with_a_different_github(self):
user = AuthUserFactory()
user.add_addon('github')
oauth_settings2 = GitHubAccountFactory(display_name='hmoco2')
oauth_settings2.save()
user.external_accounts.append(oauth_settings2)
user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 2)
assert_equal(github_res['users']['authorized'], 1)
assert_equal(github_res['users']['linked'], 1)
def test_many_users_each_with_the_same_github_enabled(self):
user = AuthUserFactory()
user.add_addon('github')
user.external_accounts.append(self.external_account)
user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 2)
def test_github_enabled_not_linked_or_authorized(self):
user = AuthUserFactory()
user.add_addon('github')
user.external_accounts.append(self.external_account)
user.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['users']['enabled'], 2)
assert_equal(github_res['users']['authorized'], 1)
assert_equal(github_res['users']['linked'], 1)
def test_one_node_with_multiple_addons(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['nodes']['total'], 1)
assert_equal(googledrive_res['nodes']['total'], 0)
self.user.add_addon('googledrive')
user_addon = self.user.get_addon('googledrive')
oauth_settings = GoogleDriveAccountFactory()
oauth_settings.save()
self.user.external_accounts.append(oauth_settings)
self.user.save()
self.node.add_addon('googledrive', Auth(self.user))
node_addon = self.node.get_addon('googledrive')
node_addon.user = self.user.fullname
node_addon.user_settings = user_addon
node_addon.external_account = oauth_settings
node_addon.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
googledrive_res = [res for res in results if res['provider']['name'] == 'googledrive'][0]
assert_equal(github_res['nodes']['total'], 1)
assert_equal(googledrive_res['nodes']['total'], 1)
def test_many_nodes_with_one_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['total'], 1)
node = ProjectFactory(creator=self.user)
node.add_addon('github', Auth(self.user))
node_addon = node.get_addon('github')
node_addon.user = self.user.fullname
node_addon.repo = '8 (circle)'
node_addon.user_settings = self.user_addon
node_addon.external_account = self.external_account
node_addon.save()
node.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['total'], 2)
def test_node_count_deleted_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['deleted'], 0)
node = ProjectFactory(creator=self.user)
node.add_addon('github', Auth(self.user))
node_addon = node.get_addon('github')
node_addon.delete()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['deleted'], 1)
def test_node_count_disconected_addon(self):
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['disconnected'], 0)
node = ProjectFactory(creator=self.user)
node.add_addon('github', Auth(self.user))
node_addon = node.get_addon('github')
node_addon.external_account = None
node_addon.save()
results = AddonSnapshot().get_events()
github_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(github_res['nodes']['disconnected'], 1)
def test_all_users_have_wiki_osfstorage_enabled(self):
all_user_count = User.find().count()
results = AddonSnapshot().get_events()
osfstorage_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]
wiki_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]
assert_equal(osfstorage_res['users']['enabled'], all_user_count)
assert_equal(wiki_res['users']['enabled'], all_user_count)
def test_wiki_deleted_shows_as_deleted(self):
node = ProjectFactory(creator=self.user)
node.delete_addon('wiki', auth=Auth(self.user))
results = AddonSnapshot().get_events()
wiki_res = [res for res in results if res['provider']['name'] == 'wiki'][0]
assert_equal(wiki_res['nodes']['deleted'], 1)
def test_node_settings_has_no_owner_not_connected(self):
self.node_addon.owner = None
self.node_addon.save()
results = AddonSnapshot().get_events()
storage_res = [res for res in results if res['provider']['name'] == 'github'][0]
assert_equal(storage_res['nodes']['connected'], 0)
def test_bookmark_collection_not_counted(self):
BookmarkCollectionFactory(creator=self.user)
all_node_count = Node.find().count()
results = AddonSnapshot().get_events()
storage_res = [res for res in results if res['provider']['name'] == 'osfstorage'][0]
assert_equal(storage_res['nodes']['connected'], all_node_count - 1)
|
|
from __future__ import unicode_literals
import json
from unittest import TestCase
from datetime import datetime, date
from collections import Sequence, Set
import pytest
import cherrypy
from sideboard.lib._services import _Services
from sideboard.lib import Model, serializer, ajax, is_listy, log
class TestServices(TestCase):
def setUp(self):
self.services = _Services()
def test_service_registration(self):
self.services.register(self, 'foo')
self.services.foo.assertTrue(True)
def test_service_double_registration(self):
self.services.register(self, 'foo')
self.services.register(self, 'bar')
self.assertRaises(AssertionError, self.services.register, self, 'foo')
def test_service_preregistration_getattr(self):
foo = self.services.foo
self.services.register(self, 'foo')
foo.assertTrue(True)
class TestModel(TestCase):
def assert_model(self, data, unpromoted=None):
model = Model(data, 'test', unpromoted)
self.assertEqual('some_uuid', model.id)
self.assertEqual('some_uuid', model['id'])
self.assertEqual(5, model.foo)
self.assertEqual(5, model['foo'])
self.assertEqual({'baz': 'baf'}, model.bar)
self.assertEqual({'baz': 'baf'}, model['bar'])
def test_missing_key(self):
model = Model({}, 'test')
self.assertIs(None, model.does_not_exist)
def test_id_unsettable(self):
model = Model({'id': 'some_uuid'}, 'test')
model.id = 'some_uuid'
model['id'] = 'some_uuid'
self.assertEqual(model.id, 'some_uuid')
with self.assertRaises(Exception):
model.id = 'another_uuid'
with self.assertRaises(Exception):
model['id'] = 'another_uuid'
def test_extra_data_only(self):
d = {
'id': 'some_uuid',
'extra_data': {
'test_foo': 5,
'test_bar': {'baz': 'baf'}
}
}
for data in [d, dict(d, test_data={})]:
self.assert_model(data)
model = Model(d, 'test')
model.fizz = 'buzz'
model['buzz'] = 'fizz'
self.assertEqual('fizz', model._data['extra_data']['test_buzz'])
self.assertEqual('buzz', model._data['extra_data']['test_fizz'])
def test_project_data(self):
d = {
'id': 'some_uuid',
'test_data': {
'foo': 5,
'bar': {'baz': 'baf'}
}
}
for data in [d, dict(d, extra_data={})]:
self.assert_model(data)
model = Model(data, 'test')
model.fizz = 'buzz'
model['buzz'] = 'fizz'
self.assertEqual('fizz', model._data['test_data']['buzz'])
self.assertEqual('buzz', model._data['test_data']['fizz'])
def test_both_data(self):
data = {
'id': 'some_uuid',
'extra_data': {
'test_foo': 5
},
'test_data': {
'bar': {'baz': 'baf'}
}
}
self.assert_model(data)
model = Model(data, 'test')
model.fizz = 'buzz'
model['buzz'] = 'fizz'
self.assertEqual('fizz', model._data['test_data']['buzz'])
self.assertEqual('buzz', model._data['test_data']['fizz'])
model.foo = 6
model.bar = {'baf': 'baz'}
self.assertEqual({}, model._data['extra_data'])
self.assertEqual(6, model.foo)
self.assertEqual({'baf': 'baz'}, model['bar'])
self.assertEqual(6, model._data['test_data']['foo'])
self.assertEqual({'baf': 'baz'}, model._data['test_data']['bar'])
def test_unpromoted_prepromotion(self):
data = {
'id': 'some_uuid',
'extra_data': {
'foo': 5,
'test_bar': {'baz': 'baf'}
}
}
self.assert_model(data, {'foo'})
model = Model(data, 'test', unpromoted={'foo'})
model.foo = 6
self.assertEqual(6, model.foo)
self.assertNotIn('foo', model._data)
self.assertEqual(6, model._data['extra_data']['foo'])
def test_unpromoted_postpromotion(self):
data = {
'id': 'some_uuid',
'foo': 5,
'extra_data': {
'test_bar': {'baz': 'baf'}
}
}
self.assert_model(data, {'foo'})
model = Model(data, 'test', unpromoted={'foo'})
model.foo = 6
self.assertEqual(6, model.foo)
self.assertEqual(6, model._data['foo'])
self.assertNotIn('foo', model._data['extra_data'])
def test_unpromoted_not_present(self):
data = {'id': 'some_uuid'}
model = Model(data, 'test', unpromoted={'foo'})
self.assertIs(None, model.foo)
model.foo = 'bar'
self.assertEqual('bar', model.foo)
self.assertNotIn('foo', model._data)
self.assertEqual('bar', model._data['extra_data']['foo'])
def test_subclass(self):
self.assertRaises(Exception, Model, {})
class TestModel(Model):
_prefix = 'test'
_unpromoted = {'foo'}
_defaults = {'baz': 'baf'}
data = {'id': 'some_uuid'}
model = TestModel(data)
self.assertIs(None, model.foo)
model.foo = 'bar'
self.assertEqual('baf', model.baz)
self.assertEqual('bar', model.foo)
self.assertNotIn('foo', model._data)
self.assertEqual('bar', model._data['extra_data']['foo'])
def test_defaults(self):
data = {
'extra_data': {
'test_foo': -1,
'bar': -2
},
'test_data': {
'baz': -3
},
'baf': -4
}
model = Model(data, 'test', {'bar','baf','fizz'}, {
'foo': 1,
'bar': 2,
'baz': 3,
'baf': 4,
'fizz': 5,
'buzz': 6
})
self.assertEqual(model.foo, -1)
self.assertEqual(model.bar, -2)
self.assertEqual(model.baz, -3)
self.assertEqual(model.baf, -4)
self.assertEqual(model.fizz, 5)
self.assertEqual(model.buzz, 6)
model.foo, model.bar, model.baz, model.baf = range(11, 15)
self.assertEqual(model.foo, 11)
self.assertEqual(model.bar, 12)
self.assertEqual(model.baz, 13)
self.assertEqual(model.baf, 14)
self.assertEqual(model.fizz, 5)
self.assertEqual(model.buzz, 6)
def test_to_dict(self):
data = {
'id': 'some_uuid',
'extra_data': {
'test_foo': 5,
'fizz': 'buzz',
'spam': 'eggs'
},
'test_data': {'bar': 'baz'}
}
model = Model(data, 'test', {'fizz'})
serialized = {
'id': 'some_uuid',
'foo': 5,
'bar': 'baz',
'fizz': 'buzz',
'extra_data': {'spam': 'eggs'}
}
self.assertEqual(model.to_dict(), serialized)
serialized.pop('extra_data')
self.assertEqual(dict(model), serialized)
def test_query(self):
model = Model({'_model': 'Test', 'id': 'some_uuid'}, 'test')
self.assertEqual(model.query, {
'_model': 'Test',
'field': 'id',
'value': 'some_uuid'
})
for data in [{}, {'_model': 'Test'}, {'id': 'some_uuid'}]:
with self.assertRaises(Exception):
Model(data, 'test').query
def test_dirty(self):
data = {
'id': 'some_uuid',
'spam': 'eggs',
'extra_data': {
'test_foo': 5
},
'test_data': {
'bar': {'baz': 'baf'}
}
}
self.assertEqual(Model(data, 'test').dirty, {})
model = Model(data, 'test')
model.spam = 'nee'
self.assertEqual(model.dirty, {'spam': 'nee'})
model = Model(data, 'test')
model.foo = 6
self.assertEqual(model.dirty, {'extra_data': {}, 'test_data': {'foo': 6, 'bar': {'baz': 'baf'}}})
model = Model(data, 'test')
model.bar = {'fizz': 'buzz'}
self.assertEqual(model.dirty, {'test_data': {'bar': {'fizz': 'buzz'}}})
model = Model(data, 'test')
model.bar['baz'] = 'zab'
self.assertEqual(model.dirty, {'test_data': {'bar': {'baz': 'zab'}}})
model = Model(data, 'test')
model.foo = 6
model.bar = 'baz'
model.spam = 'nee'
model.fizz = 'buzz'
self.assertEqual(model.dirty, {
'spam': 'nee',
'test_data': {
'foo': 6,
'bar': 'baz',
'fizz': 'buzz'
},
'extra_data': {}
})
model = Model({}, 'test')
model.foo = 'bar'
self.assertEqual(model.dirty, {'extra_data': {'test_foo': 'bar'}})
class TestSerializer(TestCase):
class Foo(object):
def __init__(self, x):
self.x = x
class Bar(Foo): pass
def setUp(self):
self.addCleanup(setattr, serializer, '_registry', serializer._registry.copy())
def test_date(self):
d = date(2001, 2, 3)
assert '"2001-02-03"' == json.dumps(d, cls=serializer)
def test_datetime(self):
dt = datetime(2001, 2, 3, 4, 5, 6)
assert '"{}"'.format(dt.strftime(serializer._datetime_format)) == json.dumps(dt, cls=serializer)
def test_duplicate_registration(self):
pytest.raises(Exception, serializer.register, datetime, lambda dt: None)
def test_new_type(self):
serializer.register(self.Foo, lambda foo: foo.x)
assert '5' == json.dumps(self.Foo(5), cls=serializer)
assert '6' == json.dumps(self.Foo(6), cls=serializer)
def test_new_type_subclass(self):
serializer.register(self.Foo, lambda foo: 'Hello World!')
serializer.register(self.Bar, lambda bar: 'Hello Kitty!')
assert '"Hello World!"' == json.dumps(self.Foo(5), cls=serializer)
assert '"Hello Kitty!"' == json.dumps(self.Bar(6), cls=serializer)
"""
Here are some cases which are currently undefined (and I'm okay with it):
class Foo(object): pass
class Bar(object): pass
class Baz(Foo, Bar): pass
class Baf(Foo): pass
class Bax(Foo): pass
serializer.register(Foo, foo_preprocessor)
serializer.register(Bar, bar_preprocessor)
serializer.register(Baf, baf_preprocessor)
json.dumps(Baz(), cls=serializer) # undefined which function will be used
json.dumps(Bax(), cls=serializer) # undefined which function will be used
"""
class TestIsListy(TestCase):
"""
We test all sequence types, set types, and mapping types listed at
http://docs.python.org/2/library/stdtypes.html plus a few example
user-defined collections subclasses.
"""
def test_sized_builtin(self):
for x in [(), (1,), [], [1], set(), set([1]), frozenset(), frozenset([1]),
xrange(0), xrange(2), bytearray(), bytearray(1), buffer(''), buffer('x')]:
assert is_listy(x)
def test_excluded(self):
assert not is_listy({})
assert not is_listy('')
assert not is_listy(b'')
def test_unsized_builtin(self):
assert not is_listy(iter([]))
assert not is_listy(i for i in range(2))
def test_user_defined_types(self):
assert not is_listy(Model({}, 'test'))
class AlwaysEmptySequence(Sequence):
def __len__(self): return 0
def __getitem__(self, i): return [][i]
assert is_listy(AlwaysEmptySequence())
class AlwaysEmptySet(Set):
def __len__(self): return 0
def __iter__(self): return iter([])
def __contains__(self, x): return False
assert is_listy(AlwaysEmptySet())
def test_miscellaneous(self):
class Foo(object): pass
for x in [0, 1, False, True, Foo, object, object()]:
assert not is_listy(x)
def test_double_mount(request):
class Root(object): pass
request.addfinalizer(lambda: cherrypy.tree.apps.pop('/test', None))
cherrypy.tree.mount(Root(), '/test')
pytest.raises(Exception, cherrypy.tree.mount, Root(), '/test')
def test_ajaz_serialization():
class Root(object):
@ajax
def returns_date(self):
return date(2001, 2, 3)
assert '"2001-02-03"' == Root().returns_date()
def test_trace_logging():
log.trace('normally this would be an error')
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from tackerclient.common import exceptions
from tackerclient.i18n import _
from tackerclient.tacker import v1_0 as tackerV10
_VNFFG = 'vnffg'
_NFP = 'nfp'
_SFC = 'sfc'
_FC = 'classifier'
class ListFC(tackerV10.ListCommand):
"""List FCs that belong to a given tenant."""
resource = _FC
list_columns = ['id', 'status', 'nfp_id', 'chain_id']
def extend_list(self, data, parsed_args):
"""Update the list_columns list.
This method update the list_columns list by adding the
'name' column in case the retrieved FC list from the tacker
server side contains the names of the FCs.
"""
for item in data:
if 'name' in item:
self.list_columns.insert(1, 'name')
break
class ShowFC(tackerV10.ShowCommand):
"""Show information of a given FC."""
resource = _FC
class ListSFC(tackerV10.ListCommand):
"""List SFCs that belong to a given tenant."""
resource = _SFC
list_columns = ['id', 'status', 'nfp_id']
class ShowSFC(tackerV10.ShowCommand):
"""Show information of a given SFC."""
resource = _SFC
class ListNFP(tackerV10.ListCommand):
"""List NFPs that belong to a given tenant."""
resource = _NFP
list_columns = ['id', 'name', 'status', 'vnffg_id', 'path_id']
class ShowNFP(tackerV10.ShowCommand):
"""Show information of a given NFP."""
resource = _NFP
class ListVNFFG(tackerV10.ListCommand):
"""List VNFFGs that belong to a given tenant."""
resource = _VNFFG
list_columns = ['id', 'name', 'ns_id',
'description', 'status', 'vnffgd_id']
class ShowVNFFG(tackerV10.ShowCommand):
"""Show information of a given VNFFG."""
resource = _VNFFG
class CreateVNFFG(tackerV10.CreateCommand):
"""Create a VNFFG."""
resource = _VNFFG
remove_output_fields = ["attributes"]
def add_known_arguments(self, parser):
parser.add_argument(
'name', metavar='NAME',
help=_('Set a name for the VNFFG'))
vnffgd_group = parser.add_mutually_exclusive_group(required=True)
vnffgd_group.add_argument(
'--vnffgd-id',
help=_('VNFFGD ID to use as template to create VNFFG'))
vnffgd_group.add_argument(
'--vnffgd-name',
help=_('VNFFGD Name to use as template to create VNFFG'))
vnffgd_group.add_argument(
'--vnffgd-template',
help=_('VNFFGD file to create VNFFG'))
parser.add_argument(
'--vnf-mapping',
help=_('List of logical VNFD name to VNF instance name mapping. '
'Example: VNF1:my_vnf1,VNF2:my_vnf2'))
parser.add_argument(
'--symmetrical',
action='store_true',
default=False,
help=_('Should a reverse path be created for the NFP'))
parser.add_argument(
'--param-file',
help='Specify parameter yaml file'
)
def args2body(self, parsed_args):
args = {'attributes': {}}
body = {self.resource: args}
tacker_client = self.get_client()
tacker_client.format = parsed_args.request_format
if parsed_args.vnf_mapping:
_vnf_mapping = dict()
_vnf_mappings = parsed_args.vnf_mapping.split(",")
for mapping in _vnf_mappings:
vnfd_name, vnf = mapping.split(":", 1)
_vnf_mapping[vnfd_name] = \
tackerV10.find_resourceid_by_name_or_id(
tacker_client, 'vnf', vnf)
parsed_args.vnf_mapping = _vnf_mapping
if parsed_args.vnffgd_name:
_id = tackerV10.find_resourceid_by_name_or_id(tacker_client,
'vnffgd',
parsed_args.
vnffgd_name)
parsed_args.vnffgd_id = _id
elif parsed_args.vnffgd_template:
with open(parsed_args.vnffgd_template) as f:
template = f.read()
try:
args['vnffgd_template'] = yaml.load(
template, Loader=yaml.SafeLoader)
except yaml.YAMLError as e:
raise exceptions.InvalidInput(reason=e)
if not args['vnffgd_template']:
raise exceptions.InvalidInput(
reason='The vnffgd file is empty')
if parsed_args.param_file:
with open(parsed_args.param_file) as f:
param_yaml = f.read()
try:
args['attributes']['param_values'] = yaml.load(
param_yaml, Loader=yaml.SafeLoader)
except yaml.YAMLError as e:
raise exceptions.InvalidInput(reason=e)
tackerV10.update_dict(parsed_args, body[self.resource],
['tenant_id', 'name', 'vnffgd_id',
'symmetrical', 'vnf_mapping'])
return body
class UpdateVNFFG(tackerV10.UpdateCommand):
"""Update a given VNFFG."""
resource = _VNFFG
def add_known_arguments(self, parser):
parser.add_argument(
'--vnffgd-template',
help=_('VNFFGD file to update VNFFG')
)
parser.add_argument(
'--vnf-mapping',
help=_('List of logical VNFD name to VNF instance name mapping. '
'Example: VNF1:my_vnf1,VNF2:my_vnf2'))
parser.add_argument(
'--symmetrical',
action='store_true',
default=False,
help=_('Should a reverse path be created for the NFP'))
def args2body(self, parsed_args):
args = {}
body = {self.resource: args}
tacker_client = self.get_client()
tacker_client.format = parsed_args.request_format
if parsed_args.vnf_mapping:
_vnf_mapping = dict()
_vnf_mappings = parsed_args.vnf_mapping.split(",")
for mapping in _vnf_mappings:
vnfd_name, vnf = mapping.split(":", 1)
_vnf_mapping[vnfd_name] = \
tackerV10.find_resourceid_by_name_or_id(
tacker_client, 'vnf', vnf)
parsed_args.vnf_mapping = _vnf_mapping
if parsed_args.vnffgd_template:
with open(parsed_args.vnffgd_template) as f:
template = f.read()
try:
args['vnffgd_template'] = yaml.load(
template, Loader=yaml.SafeLoader)
except yaml.YAMLError as e:
raise exceptions.InvalidInput(reason=e)
if not args['vnffgd_template']:
raise exceptions.InvalidInput(
reason='The vnffgd template is empty')
tackerV10.update_dict(parsed_args, body[self.resource],
['tenant_id', 'vnf_mapping', 'symmetrical'])
return body
class DeleteVNFFG(tackerV10.DeleteCommand):
"""Delete a given VNFFG."""
resource = _VNFFG
remove_output_fields = ["attributes"]
def add_known_arguments(self, parser):
parser.add_argument(
'--force',
default=False,
action='store_true',
help=_('Force delete VNFFG'))
def args2body(self, parsed_args):
body = dict()
if parsed_args.force:
body[self.resource] = dict()
body[self.resource]['attributes'] = {'force': True}
return body
|
|
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at:
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the
# License.
# Python 2/3 compatibility
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from decimal import Decimal
from itertools import chain
import six
from amazon.ion.core import timestamp, TimestampPrecision
from amazon.ion.exceptions import IonException
from amazon.ion.reader import ReadEventType, _NARROW_BUILD
from amazon.ion.reader_text import reader, _POS_INF, _NEG_INF, _NAN
from amazon.ion.symbols import SymbolToken
from amazon.ion.util import coroutine
from tests import listify, parametrize
from tests.event_aliases import *
from tests.reader_util import ReaderParameter, reader_scaffold, all_top_level_as_one_stream_params, value_iter
_P = ReaderParameter
_ts = timestamp
_tp = TimestampPrecision
_d = Decimal
_st = partial(SymbolToken, sid=None, location=None)
def _sid(sid):
return SymbolToken(text=None, sid=sid, location=None)
_BAD_GRAMMAR = (
(b'$ion_1_1 42',),
(b'$ion_10_1 42',),
(b'$ion_1_02 42',),
(b'+1',),
(b'01',),
(b'1.23.4',),
(b'1__0',),
(b'1_e1',),
(b'1e_1',),
(b'1e1_',),
(b'-infs',),
(b'+infs',),
(b'-in',),
(b'1._0',),
(b'1_.0',),
(b'-_1',),
(b'1_',),
(b'0_x1',),
(b'0b_1',),
(b'1e0-1',),
(b'1e0e-1',),
(b'+inf-',),
(b'null.strings',),
(b'null.strn',),
(b'null.flat',),
(b'null.x',),
(b'null.',),
(b'200T',),
(b'10000T',),
(b'-2000T',),
(b'-0001T',),
(b'00-01T',),
(b'2000-01',),
(b'2000-001T',),
(b'2000--01T',),
(b'2000-01-123T',),
(b'2000-01-12T1',),
(b'2000-01--3T',),
(b'2007-02-23T20:14:33.Z',),
(b'2007-02-23T20:14:33.12.3Z',),
(b'2007-02-23T20:14:33.12+00',),
(b'1a',),
(b'foo-',),
(b'%',),
(b'n%',),
(b'"\n"',),
(b'"\a"',),
(b'"\\\a"',),
(b'"a\b"',),
(b'"\\\n\r"',),
(b'"\0"',),
(b"'\n'",),
(b"'\a'",),
(b"'\\\a'",),
(b"'a\b'",),
(b"'\\\n\r'",),
(b"'\0'",),
(b"'''\b'''",),
(b"'''a\b'''",),
(b'"\\udbff\\""',), # Unpaired escaped surrogate.
(b'"\\udbffabcdef',), # Unpaired escaped surrogate.
(b"'''\\udbff'''",), # Splitting surrogate escapes across long string literal boundaries is illegal per the spec.
(b'abc://',),
(b'abc/**/://',),
(b'{{/**/}}',),
(b'{{//\n}}',),
(b'{{/**/"abc"}}',),
(b'{{"abc"//\n}}',),
(b'{{\'\'\'abc\'\'\'//\n\'\'\'def\'\'\'}}',),
(b'{{"\xf6"}}',),
(b'{{"\n"}}',),
(b"{{'''\0'''}}",),
(b"{{'''\\u0000'''}}",),
(b"{{'''\\u3000'''}}",),
(b'{{"\\u0000"}}',),
(b'{{"\\u3000"}}',),
(b'{{"\\U0001f4a9"}}',),
(b'{{ abcd} }',),
(b'{ {abcd}}', e_start_struct()),
(b'{{\'\' \'foo\'\'\'}}',),
(b'{{\'"foo"}}',),
(b'{{abc}de}}',),
(b'{{"abc"}de}',),
(b'{{ab}}',),
(b'{{ab=}}',),
(b'{{ab=}=}',),
(b'{{ab===}}',),
(b'{{====}}',),
(b'{{abcd====}}',),
(b'{{abc*}}',),
(b'{foo:bar/**/baz:zar}', e_start_struct(), e_symbol(value=_st(u'bar'), field_name=_st(u'foo'))),
(b'{foo:bar/**/baz}', e_start_struct(), e_symbol(value=_st(u'bar'), field_name=_st(u'foo'))),
(b'[abc 123]', e_start_list(), e_symbol(value=_st(u'abc'))),
(b'[abc/**/def]', e_start_list(), e_symbol(value=_st(u'abc'))),
(b'{abc:}', e_start_struct()),
(b'{abc :}', e_start_struct()),
(b'{abc : //\n}', e_start_struct()),
(b'[abc:]', e_start_list()),
(b'(abc:)', e_start_sexp()),
(b'[abc::]', e_start_list()),
(b'(abc::)', e_start_sexp()),
(b'{abc::}', e_start_struct()),
(b'[abc ::]', e_start_list()),
(b'(abc/**/::)', e_start_sexp()),
(b'{abc//\n::}', e_start_struct()),
(b'[abc::/**/]', e_start_list()),
(b'(abc:: )', e_start_sexp()),
(b'{abc:://\n}', e_start_struct()),
(b'{foo:abc::}', e_start_struct()),
(b'{foo:abc::/**/}', e_start_struct()),
(b'{foo::bar}', e_start_struct()),
(b'{foo::bar:baz}', e_start_struct()),
(b'{foo, bar}', e_start_struct()),
(b'{foo}', e_start_struct()),
(b'{123}', e_start_struct()),
(b'{42, 43}', e_start_struct()),
(b'[abc, , 123]', e_start_list(), e_symbol(value=_st(u'abc'))),
(b'[\'\'\'abc\'\'\'\'\']', e_start_list()),
(b'[\'\'\'abc\'\'\'\'foo\']', e_start_list()),
(b'[\'\'\'abc\'\'\'\'\', def]', e_start_list()),
(b'{foo:\'\'\'abc\'\'\'\'\'}', e_start_struct()),
(b'{foo:\'\'\'abc\'\'\'\'\', bar:def}', e_start_struct()),
(b'[,]', e_start_list()),
(b'(,)', e_start_sexp()),
(b'{,}', e_start_struct()),
(b'{foo:bar, ,}', e_start_struct(), e_symbol(value=_st(u'bar'), field_name=_st(u'foo'))),
(b'{true:123}', e_start_struct()),
(b'{false:123}', e_start_struct()),
(b'{+inf:123}', e_start_struct()),
(b'{-inf:123}', e_start_struct()),
(b'{nan:123}', e_start_struct()),
(b'{nan}', e_start_struct()),
(b'{null.clob:123}', e_start_struct()),
(b'{%:123}', e_start_struct()),
(b'\'\'\'foo\'\'\'/\'\'\'bar\'\'\'',), # Dangling slash at the top level.
(b'{{\'\'\'foo\'\'\' \'\'bar\'\'\'}}',),
(b'{\'\'\'foo\'\'\'/**/\'\'bar\'\'\':baz}', e_start_struct()), # Missing an opening ' before "bar".
(b'{\'\'\'foo\'\'\'/**/\'\'\'bar\'\'\'a:baz}', e_start_struct()), # Character after field name, before colon.
(b'{\'foo\'a:baz}', e_start_struct()),
(b'{"foo"a:baz}', e_start_struct()),
(b'(1..)', e_start_sexp()),
(b'(1.a)', e_start_sexp()),
(b'(1.23.)', e_start_sexp()),
(b'(42/)', e_start_sexp()),
(b'42/',),
(b'0/',),
(b'1.2/',),
(b'1./',),
(b'1.2e3/',),
(b'1.2d3/',),
(b'2000T/',),
(b'/ ',),
(b'/b',),
)
_BAD_VALUE = (
(b'0000T',), # Years must be 1..9999
(b'2000-13T',), # 2000 didn't have a thirteenth month.
(b'2015-02-29T',), # 2015 was not a leap year.
(b'2000-01-01T24:00Z',), # Hour is 0..23.
(b'2000-01-01T00:60Z',), # Minute is 0..59.
(b'2000-01-01T00:00:60Z',), # Second is 0..59.
(b'2000-01-01T00:00:00.000+24:00',), # Hour offset is 0..23.
(b'2000-01-01T00:00:00.000+00:60',), # Minute offset is 0..59.
(b'"\\udbff\\u3000"',), # Malformed surrogate pair (\u3000 is not a low surrogate).
(b'"\\u3000\\udfff"',), # Malformed surrogate pair (\u3000 is not a high surrogate).
)
_INCOMPLETE = (
(b'{',), # Might be a lob.
(b'{ ', e_start_struct()),
(b'[', e_start_list()),
(b'(', e_start_sexp()),
(b'[[]', e_start_list(), e_start_list(), e_end_list()),
(b'(()', e_start_sexp(), e_start_sexp(), e_end_sexp()),
(b'{foo:{}', e_start_struct(), e_start_struct(field_name=_st(u'foo')), e_end_struct()),
(b'{foo:bar', e_start_struct(),),
(b'{foo:bar::', e_start_struct(),),
(b'{foo:bar,', e_start_struct(), e_symbol(_st(u'bar'), field_name=_st(u'foo'))),
(b'[[],', e_start_list(), e_start_list(), e_end_list()),
(b'{foo:{},', e_start_struct(), e_start_struct(field_name=_st(u'foo')), e_end_struct()),
(b'foo',), # Might be an annotation.
(b'\'foo\'',), # Might be an annotation.
(b'\'\'\'foo\'\'\'/**/',), # Might be followed by another triple-quoted string.
(b'\'\'\'\'',), # Might be followed by another triple-quoted string.
(b"'''abc''''def'", e_string(u'abc'),),
(b'123',), # Might have more digits.
(b'42/',), # The / might start a comment
(b'0/',),
(b'1.2/',),
(b'1./',),
(b'1.2e3/',),
(b'1.2d3/',),
(b'2000T/',),
(b'-',),
(b'+',),
(b'1.2',),
(b'1.2e',),
(b'1.2e-',),
(b'+inf',), # Might be followed by more characters, making it invalid at the top level.
(b'-inf',),
(b'nan',),
(b'1.2d',),
(b'1.2d3',),
(b'1_',),
(b'0b',),
(b'0x',),
(b'2000-01',),
(b'"abc',),
(b'false',), # Might be a symbol with more characters.
(b'true',),
(b'null.string',), # Might be a symbol with more characters.
(b'/',),
(b'/*',),
(b'//',),
(b'foo:',),
(b'foo::',),
(b'foo::bar',),
(b'foo//\n',),
(b'{foo', e_start_struct()),
(b'{{',),
(b'{{"',),
(b'(foo-', e_start_sexp(), e_symbol(_st(u'foo'))),
(b'(-foo', e_start_sexp(), e_symbol(_st(u'-'))),
)
_SKIP = (
[(e_read(b'123 456 '), e_int(123)), (SKIP, TypeError)], # Can't skip at top-level.
[(e_read(b'[]'), e_start_list()), (SKIP, e_end_list()), (NEXT, END)],
[(e_read(b'{//\n}'), e_start_struct()), (SKIP, e_end_struct()), (NEXT, END)],
[(e_read(b'(/**/)'), e_start_sexp()), (SKIP, e_end_sexp()), (NEXT, END)],
[(e_read(b'[a,b,c]'), e_start_list()), (NEXT, e_symbol(_st(u'a'))), (SKIP, e_end_list()), (NEXT, END)],
[
(e_read(b'{c:a,d:e::b}'), e_start_struct()),
(NEXT, e_symbol(_st(u'a'), field_name=_st(u'c'))),
(SKIP, e_end_struct()), (NEXT, END)
],
[
(e_read(b'(([{a:b}]))'), e_start_sexp()),
(NEXT, e_start_sexp()),
(SKIP, e_end_sexp()),
(NEXT, e_end_sexp()), (NEXT, END)],
[
(e_read(b'['), e_start_list()),
(SKIP, INC),
(e_read(b'a,42'), INC),
(e_read(b',]'), e_end_list()), (NEXT, END)
],
[
(e_read(b'{'), INC),
(e_read(b'foo'), e_start_struct()),
(SKIP, INC),
(e_read(b':bar,baz:zar}'), e_end_struct()), (NEXT, END)
],
[
(e_read(b'('), e_start_sexp()),
(SKIP, INC),
(e_read(b'a+b'), INC),
(e_read(b'//\n'), INC),
(e_read(b')'), e_end_sexp()), (NEXT, END)
],
)
_NEXT_ERROR = (NEXT, IonException)
_NEXT_INC = (NEXT, INC)
_NEXT_END = (NEXT, END)
_GOOD_FLUSH = (
[(e_read(b'0'), INC), (NEXT, e_int(0)), _NEXT_END],
[(e_read(b'1'), INC), (NEXT, e_int(1)), _NEXT_END],
[(e_read(b'-0'), INC), (NEXT, e_int(0)), _NEXT_END],
[(e_read(b'123'), INC), (NEXT, e_int(123)), _NEXT_END],
[(e_read(b'123.'), INC), (NEXT, e_decimal(_d(123))), _NEXT_END],
[(e_read(b'1.23e-4'), INC), (NEXT, e_float(1.23e-4)), _NEXT_END],
[(e_read(b'1.23d+4'), INC), (NEXT, e_decimal(_d(u'1.23e4'))), _NEXT_END],
[(e_read(b'2000-01-01'), INC), (NEXT, e_timestamp(_ts(2000, 1, 1, precision=_tp.DAY))), _NEXT_END],
[(e_read(b"a"), INC), (NEXT, e_symbol(_st(u'a'))), _NEXT_END],
[(e_read(b"'abc'"), INC), (NEXT, e_symbol(_st(u'abc'))), _NEXT_END],
[(e_read(b"$abc"), INC), (NEXT, e_symbol(_st(u'$abc'))), _NEXT_END],
[(e_read(b"$"), INC), (NEXT, e_symbol(_st(u'$'))), _NEXT_END],
[(e_read(b"$10"), INC), (NEXT, e_symbol(_sid(10))), _NEXT_END, (e_read(b'0'), INC), (NEXT, e_int(0)), _NEXT_END],
[(e_read(b'abc'), INC), (NEXT, e_symbol(_st(u'abc'))), _NEXT_END, (e_read(b'def'), INC),
(NEXT, e_symbol(_st(u'def'))), _NEXT_END],
[(e_read(b"''"), INC), (NEXT, e_symbol(_st(u''))), _NEXT_END],
[(e_read(b"'''abc'''"), INC), (NEXT, e_string(u'abc')), (NEXT, END), (e_read(b"'''def'''"), INC),
(NEXT, e_string(u'def')), _NEXT_END],
[(e_read(b"'''abc''''def'"), e_string(u'abc')), _NEXT_INC, (NEXT, e_symbol(_st(u'def'))), _NEXT_END],
[(e_read(b"'''abc'''''"), INC), (NEXT, e_string(u'abc')), (NEXT, e_symbol(_st(u''))), _NEXT_END],
[(e_read(b"'''abc'''//\n'def'"), e_string(u'abc')), _NEXT_INC, (NEXT, e_symbol(_st(u'def'))), _NEXT_END],
[(e_read(b"'''abc'''/**/''"), INC), (NEXT, e_string(u'abc')), (NEXT, e_symbol(_st(u''))), _NEXT_END],
[(e_read(b"'''abc'''//\n/**/''"), INC), (NEXT, e_string(u'abc')), (NEXT, e_symbol(_st(u''))), _NEXT_END],
[(e_read(b'null'), INC), (NEXT, e_null()), _NEXT_END],
[(e_read(b'null.string'), INC), (NEXT, e_string()), _NEXT_END],
[(e_read(b'+inf'), INC), (NEXT, e_float(_POS_INF)), _NEXT_END],
[(e_read(b'nan'), INC), (NEXT, e_float(_NAN)), _NEXT_END],
[(e_read(b'true'), INC), (NEXT, e_bool(True)), _NEXT_END],
[(e_read(b'//'), INC), _NEXT_END], # Matches ion-java - termination of line comment with newline not required.
[(e_read(b'abc//123\n'), INC), (NEXT, e_symbol(_st(u'abc'))), _NEXT_END],
[(e_read(b"'abc'//123\n"), INC), (NEXT, e_symbol(_st(u'abc'))), _NEXT_END],
[(e_read(b'abc//123'), INC), (NEXT, e_symbol(_st(u'abc'))), _NEXT_END],
[(e_read(b"'abc'//123"), INC), (NEXT, e_symbol(_st(u'abc'))), _NEXT_END],
)
_BAD_FLUSH = (
[(e_read(b'$ion_1_1'), INC), _NEXT_ERROR],
[(e_read(b'123_'), INC), _NEXT_ERROR],
[(e_read(b'123e'), INC), _NEXT_ERROR],
[(e_read(b'123e-'), INC), _NEXT_ERROR],
[(e_read(b'123d+'), INC), _NEXT_ERROR],
[(e_read(b'0x'), INC), _NEXT_ERROR],
[(e_read(b'2000-01-'), INC), _NEXT_ERROR],
[(e_read(b'"'), INC), _NEXT_ERROR],
[(e_read(b'/'), INC), _NEXT_ERROR],
[(e_read(b'abc/'), INC), _NEXT_ERROR],
[(e_read(b'{/'), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b'/*'), INC), _NEXT_ERROR],
[(e_read(b'abc/*'), INC), _NEXT_ERROR],
[(e_read(b'[/*'), e_start_list()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b'(//'), e_start_sexp()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b'(//\n'), e_start_sexp()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b'+in'), INC), _NEXT_ERROR],
[(e_read(b'null.'), INC), _NEXT_ERROR],
[(e_read(b'null.str'), INC), _NEXT_ERROR],
[(e_read(b'"abc'), INC), _NEXT_ERROR],
[(e_read(b"'abc"), INC), _NEXT_ERROR],
[(e_read(b"'''abc"), INC), _NEXT_ERROR],
[(e_read(b"'''abc''''"), INC), _NEXT_ERROR],
[(e_read(b"{{abc"), INC), _NEXT_ERROR],
[(e_read(b'{{"abc"'), INC), _NEXT_ERROR],
[(e_read(b"(abc"), e_start_sexp()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"[abc "), e_start_list()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"['abc' "), e_start_list()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc:def "), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc "), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc: "), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc://"), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc:/**/"), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc:/*"), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc//\n:"), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc/**/:"), e_start_struct()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"(abc "), e_start_sexp()), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"[abc,"), e_start_list()), (NEXT, e_symbol(_st(u'abc'))), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"[abc/**/,"), e_start_list()), (NEXT, e_symbol(_st(u'abc'))), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"[abc,//"), e_start_list()), (NEXT, e_symbol(_st(u'abc'))), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"[abc,/**/"), e_start_list()), (NEXT, e_symbol(_st(u'abc'))), _NEXT_INC, _NEXT_ERROR],
[(e_read(b"{abc:def,"), e_start_struct()), (NEXT, e_symbol(_st(u'def'), field_name=_st(u'abc'))),
_NEXT_INC, _NEXT_ERROR],
[(e_read(b"abc:"), INC), _NEXT_ERROR],
[(e_read(b"abc/**/:"), INC), _NEXT_ERROR],
[(e_read(b"abc//\n:"), INC), _NEXT_ERROR],
[(e_read(b"abc::"), INC), _NEXT_ERROR],
[(e_read(b"'abc'::"), INC), _NEXT_ERROR],
[(e_read(b"abc:: //"), INC), _NEXT_ERROR],
[(e_read(b"'abc' ::/**/"), INC), _NEXT_ERROR],
[(e_read(b"abc//\n:: "), INC), _NEXT_ERROR],
[(e_read(b"'abc'/**/::"), INC), _NEXT_ERROR],
[(e_read(b"abc//\n::/**/"), INC), _NEXT_ERROR],
[(e_read(b"'abc'/**/:://"), INC), _NEXT_ERROR],
[(e_read(b'abc'), INC), (NEXT, e_symbol(_st(u'abc'))), _NEXT_END, (e_read(b'::123 '), IonException)],
[(e_read(b'$10'), INC), (NEXT, e_symbol(_sid(10))), _NEXT_END, (e_read(b'::123 '), IonException)],
)
def _good_container(start, end, *events):
return (start(),) + events + (end(),)
_good_sexp = partial(_good_container, e_start_sexp, e_end_sexp)
_good_struct = partial(_good_container, e_start_struct, e_end_struct)
_good_list = partial(_good_container, e_start_list, e_end_list)
_GOOD = (
(b'$ion_1_0 42 ', IVM, e_int(42)),
(b'$ion_1_0_ 42 ', e_symbol(_st(u'$ion_1_0_')), e_int(42)),
(b'$ion_1_0a 42 ', e_symbol(_st(u'$ion_1_0a')), e_int(42)),
(b'$ion_1_ 42 ', e_symbol(_st(u'$ion_1_')), e_int(42)),
(b'$ion_a_b 42 ', e_symbol(_st(u'$ion_a_b')), e_int(42)),
(b'$ion_1_b 42 ', e_symbol(_st(u'$ion_1_b')), e_int(42)),
(b'ann::$ion_1_0 42 ', e_symbol(_st(u'$ion_1_0'), annotations=(_st(u'ann'),)), e_int(42)),
(b'$ion_1234_1::$ion_1_0 42 ', e_symbol(_st(u'$ion_1_0'), annotations=(_st(u'$ion_1234_1'),)), e_int(42)),
(b'$ion_1_0::$ion_1234_1 42 ', e_symbol(_st(u'$ion_1234_1'), annotations=(_st(u'$ion_1_0'),)), e_int(42)),
(b'{$ion_1_0:abc}',) + _good_struct(e_symbol(_st(u'abc'), field_name=_st(u'$ion_1_0'))),
(b'($ion_1_0)',) + _good_sexp(e_symbol(_st(u'$ion_1_0'))),
(b'42[]', e_int(42)) + _good_list(),
(b'\'foo\'123 ', e_symbol(_st(u'foo')), e_int(123)),
(b'null()', e_null()) + _good_sexp(),
(b'tru{}', e_symbol(_st(u'tru'))) + _good_struct(),
(b'{{"foo"}}42{{}}', e_clob(b'foo'), e_int(42), e_blob(b'')),
(b'+inf"bar"', e_float(_POS_INF), e_string(u'bar')),
(b'foo\'bar\'"baz"', e_symbol(_st(u'foo')), e_symbol(_st(u'bar')), e_string(u'baz')),
(b'\'\'\'foo\'\'\'\'\'123 ', e_string(u'foo'), e_symbol(_st(u'')), e_int(123)),
(b'\'\'\'foo\'\'\'\'abc\'123 ', e_string(u'foo'), e_symbol(_st(u'abc')), e_int(123)),
(b'[]',) + _good_list(),
(b'()',) + _good_sexp(),
(b'{}',) + _good_struct(),
(b'{/**/}',) + _good_struct(),
(b'(/**/)',) + _good_sexp(),
(b'[/**/]',) + _good_list(),
(b'{//\n}',) + _good_struct(),
(b'(//\n)',) + _good_sexp(),
(b'[//\n]',) + _good_list(),
(b'{/**///\n}',) + _good_struct(),
(b'(/**///\n)',) + _good_sexp(),
(b'[/**///\n]',) + _good_list(),
(b'(foo)',) + _good_sexp(e_symbol(_st(u'foo'))),
(b'[foo]',) + _good_list(e_symbol(_st(u'foo'))),
(b'(\'\')',) + _good_sexp(e_symbol(_st(u''))),
(b'[\'\']',) + _good_list(e_symbol(_st(u''))),
(b'(\'foo\')',) + _good_sexp(e_symbol(_st(u'foo'))),
(b'[\'foo\']',) + _good_list(e_symbol(_st(u'foo'))),
(b'/*foo*///bar\n/*baz*/',),
(b'/*\\n*///\\u3000\n',),
(b'\'\'::123 ', e_int(123, annotations=(_st(u''),))),
(b'{foo:zar::[], bar: (), baz:{}}',) + _good_struct(
e_start_list(field_name=_st(u'foo'), annotations=(_st(u'zar'),)), e_end_list(),
e_start_sexp(field_name=_st(u'bar')), e_end_sexp(),
e_start_struct(field_name=_st(u'baz')), e_end_struct()
),
(b'[[], zar::{}, ()]',) + _good_list(
e_start_list(), e_end_list(),
e_start_struct(annotations=(_st(u'zar'),)), e_end_struct(),
e_start_sexp(), e_end_sexp(),
),
(b'{\'\':bar,}',) + _good_struct(e_symbol(_st(u'bar'), field_name=_st(u''))),
(b'{\'\':bar}',) + _good_struct(e_symbol(_st(u'bar'), field_name=_st(u''))),
(b'{\'\'\'foo\'\'\'/**/\'\'\'bar\'\'\':baz}',) + _good_struct(e_symbol(_st(u'baz'), field_name=_st(u'foobar')))
)
_GOOD_UNICODE = (
(u'{foo:bar}',) + _good_struct(e_symbol(_st(u'bar'), field_name=_st(u'foo'))),
(u'{foo:"b\xf6\u3000r"}',) + _good_struct(e_string(u'b\xf6\u3000r', field_name=_st(u'foo'))),
(u'{\'b\xf6\u3000r\':"foo"}',) + _good_struct(e_string(u'foo', field_name=_st(u'b\xf6\u3000r'))),
(u'\x7b\x7d',) + _good_struct(),
(u'\u005b\u005d',) + _good_list(),
(u'\u0028\x29',) + _good_sexp(),
(u'\u0022\u0061\u0062\u0063\u0022', e_string(u'abc')),
(u'{foo:"b\xf6\U0001f4a9r"}',) + _good_struct(e_string(u'b\xf6\U0001f4a9r', field_name=_st(u'foo'))),
(u'{\'b\xf6\U0001f4a9r\':"foo"}',) + _good_struct(e_string(u'foo', field_name=_st(u'b\xf6\U0001f4a9r'))),
(u'{"b\xf6\U0001f4a9r\":"foo"}',) + _good_struct(e_string(u'foo', field_name=_st(u'b\xf6\U0001f4a9r'))),
(u'{\'\'\'\xf6\'\'\' \'\'\'\U0001f4a9r\'\'\':"foo"}',) + _good_struct(
e_string(u'foo', field_name=_st(u'\xf6\U0001f4a9r'))
),
(u'\'b\xf6\U0001f4a9r\'::"foo"', e_string(u'foo', annotations=(_st(u'b\xf6\U0001f4a9r'),))),
(u'"\t\v\f\'"', e_string(u'\t\v\f\'')),
(u"'''\t\v\f\"\n\r'''42 ", e_string(u'\t\v\f\"\n\n'), e_int(42))
)
_BAD_UNICODE = (
(u'\xf6',), # Not an acceptable identifier symbol.
(u'r\U0001f4a9',),
(u'{foo:b\xf6\u3000r}', e_start_struct()),
(u'{b\xf6\u3000:"foo"}', e_start_struct()),
(u'{br\U0001f4a9:"foo"}', e_start_struct()),
(u'{br\U0001f4a9r:"foo"}', e_start_struct()),
(u'{\'\'\'\xf6\'\'\' \'\'\'\U0001f4a9r\'\'\'a:"foo"}', e_start_struct()),
(u'b\xf6\U0001f4a9r::"foo"',),
(u"'''\a'''",),
(u"{{'''\xf6'''}}",),
(u'{{"\u3000"}}',),
)
_GOOD_ESCAPES_FROM_UNICODE = (
(u'"\\xf6"', e_string(u'\xf6')),
(u'"\\a"', e_string(u'\a')),
(u'"a\\b"', e_string(u'a\b')),
(u'"\\r"', e_string(u'\r')),
(u"'\\xf6'42 ", e_symbol(_st(u'\xf6')), e_int(42)),
(u"'\\a'42 ", e_symbol(_st(u'\a')), e_int(42)),
(u"'a\\b'42 ", e_symbol(_st(u'a\b')), e_int(42)),
(u"'\\r'42 ", e_symbol(_st(u'\r')), e_int(42)),
(u"'''\\b'''42 ", e_string(u'\b'), e_int(42)),
(u"'''a\\b'''42 ", e_string(u'a\b'), e_int(42)),
(u'"\\u3000"', e_string(u'\u3000')),
(u'"\\udbff\\udfff"', e_string(u'\U0010ffff')), # Escaped surrogate pair.
(u'["\\U0001F4a9"]',) + _good_list(e_string(u'\U0001f4a9')),
(u'"\\t "\'\\\'\'"\\v"', e_string(u'\t '), e_symbol(_st(u'\'')), e_string(u'\v')),
(u'(\'\\/\')',) + _good_sexp(e_symbol(_st(u'/'))),
(u'{\'\\f\':foo,"\\?":\'\\\\\'::"\\v\\t"}',) + _good_struct(
e_symbol(_st(u'foo'), field_name=_st(u'\f')), e_string(u'\v\t', field_name=_st(u'?'), annotations=(_st(u'\\'),))
),
(u'\'\\?\\f\'::\'\\xF6\'::"\\\""', e_string(u'"', annotations=(_st(u'?\f'), _st(u'\xf6')))),
(u"'''\\\'\\\'\\\''''\"\\\'\"", e_string(u"'''"), e_string(u"'")),
(u"'''a''\\\'b'''\n'''\\\''''/**/''''\'c'''\"\"", e_string(u"a'''b'''c"), e_string(u'')),
(u"'''foo''''\\U0001f4a9'42 ", e_string(u'foo'), e_symbol(_st(u'\U0001f4a9')), e_int(42)),
(u"''''\\\r\n'''42 ", e_string(u"'"), e_int(42)),
(u'"\\\n"', e_string(u'')),
(u'"\\\r\n"', e_string(u'')),
(u'"\\\r"', e_string(u'')),
(u'"\\\r\\xf6"', e_string(u'\xf6')),
(u'"\\\rabc"', e_string(u'abc')),
(u"'\\\r\n'::42 ", e_int(42, annotations=(_st(u''),))),
(u"{'''\\\rfoo\\\n\r''':bar}",) + _good_struct(e_symbol(_st(u'bar'), field_name=_st(u'foo\n'))),
(u"{{'''\\x00''''''\\x7e'''}}", e_clob(b'\0~')),
(u"{{'''\\xff'''}}", e_clob(b'\xff')),
(u'{{"\\t"}}', e_clob(b'\t')),
(u'{{"\\\n"}}', e_clob(b'')),
(u"{{'''\\\r\n'''}}", e_clob(b'')),
)
_GOOD_ESCAPES_FROM_BYTES = (
(br'"\xf6"', e_string(u'\xf6')),
(br'"\a"', e_string(u'\a')),
(br'"a\b"', e_string(u'a\b')),
(br'"\r"', e_string(u'\r')),
(br"'\xf6'42 ", e_symbol(_st(u'\xf6')), e_int(42)),
(br"'\a'42 ", e_symbol(_st(u'\a')), e_int(42)),
(br"'a\b'42 ", e_symbol(_st(u'a\b')), e_int(42)),
(br"'\r'42 ", e_symbol(_st(u'\r')), e_int(42)),
(br"'''\b'''42 ", e_string(u'\b'), e_int(42)),
(br"'''a\b'''42 ", e_string(u'a\b'), e_int(42)),
(br'"\u3000"', e_string(u'\u3000')),
(br'"\udbff\udfff"', e_string(u'\U0010ffff')), # Escaped surrogate pair.
(br'["\U0001F4a9"]',) + _good_list(e_string(u'\U0001f4a9')),
(b'"\\t "\'\\\'\'"\\v"', e_string(u'\t '), e_symbol(_st(u'\'')), e_string(u'\v')),
(b'(\'\\/\')',) + _good_sexp(e_symbol(_st(u'/'))),
(b'{\'\\f\':foo,"\\?":\'\\\\\'::"\\v\\t"}',) + _good_struct(
e_symbol(_st(u'foo'), field_name=_st(u'\f')), e_string(u'\v\t', field_name=_st(u'?'), annotations=(_st(u'\\'),))
),
(b'\'\\?\\f\'::\'\\xF6\'::"\\\""', e_string(u'"', annotations=(_st(u'?\f'), _st(u'\xf6')))),
(b"'''\\\'\\\'\\\''''\"\\\'\"", e_string(u"'''"), e_string(u"'")),
(b"'''a''\\\'b'''\n'''\\\''''/**/''''\'c'''\"\"", e_string(u"a'''b'''c"), e_string(u'')),
(b"'''foo''''\\U0001f4a9'42 ", e_string(u'foo'), e_symbol(_st(u'\U0001f4a9')), e_int(42)),
(b"''''\\\r\n'''42 ", e_string(u"'"), e_int(42)),
(b'"\\\n"', e_string(u'')),
(b'"\\\r\n"', e_string(u'')),
(b'"\\\r"', e_string(u'')),
(b'"\\\r\\xf6"', e_string(u'\xf6')),
(b'"\\\rabc"', e_string(u'abc')),
(b"'\\\r\n'::42 ", e_int(42, annotations=(_st(u''),))),
(b"{'''\\\rfoo\\\n\r''':bar}",) + _good_struct(e_symbol(_st(u'bar'), field_name=_st(u'foo\n'))),
(b"{{'''\\x00''''''\\x7e'''}}", e_clob(b'\0~')),
(b"{{'''\\xff'''}}", e_clob(b'\xff')),
(b'{{"\\t"}}', e_clob(b'\t')),
(b'{{"\\\n"}}', e_clob(b'')),
(b"{{'''\\\r\n'''}}", e_clob(b'')),
)
_INCOMPLETE_ESCAPES = (
[(e_read(u'"\\'), INC), (e_read(u't"'), e_string(u'\t')), (NEXT, END)],
[
(e_read(u'\'\\x'), INC), (e_read(u'f'), INC), (e_read(u'6\'42 '), e_symbol(_st(u'\xf6'))),
(NEXT, e_int(42)), (NEXT, END)
],
[
(e_read(u'{"\\U0001'), e_start_struct()), (NEXT, INC), (e_read(u'f4a9"'), INC),
(e_read(u':bar}'), e_symbol(_st(u'bar'), field_name=_st(u'\U0001f4a9'))), (NEXT, e_end_struct()), (NEXT, END)
],
[
(e_read(u"'''\\\r"), INC), (e_read(u"\n'''//\n"), INC), (e_read(u"'''abc'''42 "), e_string(u'abc')),
(NEXT, e_int(42)), (NEXT, END)
],
)
_UNICODE_SURROGATES = (
# Note: Surrogates only allowed with UCS2.
[(e_read(u'"\ud83d\udca9"'), e_string(u'\U0001f4a9')), (NEXT, END)],
[(e_read(u'"\ud83d'), INC), (e_read(u'\udca9"'), e_string(u'\U0001f4a9')), (NEXT, END)],
)
_BAD_ESCAPES_FROM_UNICODE = (
(u'"\\g"',),
(u'\'\\q\'',),
(u'\\t',),
(u'"abc"\\t', e_string(u'abc')),
(u'\'abc\'\\n', e_symbol(_st(u'abc'))),
(u'\'abc\'\\xf6', e_symbol(_st(u'abc'))),
(u"'''abc'''\\U0001f4a9", e_string(u'abc')),
(u"''\\u3000", e_symbol(_st(u''))),
(u"'''\\u3''' '''000'''42 ",),
(u'"\\U0001f4aQ"',),
(u"{{'''abc'''\\n}}",),
(u'{{"abc"\\n}}',),
(u'{\'foo\'\\v:bar}', e_start_struct()),
(u'{\'\'\'foo\'\'\'\\xf6:bar}', e_start_struct()),
)
_BAD_ESCAPES_FROM_BYTES = (
(br'"\g"',),
(br'\'\q\'',),
(b'\\t',),
(b'"abc"\\t', e_string(u'abc')),
(b'\'abc\'\\n', e_symbol(_st(u'abc'))),
(b'\'abc\'\\xf6', e_symbol(_st(u'abc'))),
(b"'''abc'''\\U0001f4a9", e_string(u'abc')),
(b"''\\u3000", e_symbol(_st(u''))),
(b"'''\\u3''' '''000'''42 ",),
(b'"\\U0001f4aQ"',),
(b"{{'''abc'''\\n}}",),
(b'{{"abc"\\n}}',),
(b'{\'foo\'\\v:bar}', e_start_struct()),
(b'{\'\'\'foo\'\'\'\\xf6:bar}', e_start_struct()),
)
_UNSPACED_SEXPS = (
(b'(a/b)',) + _good_sexp(e_symbol(_st(u'a')), e_symbol(_st(u'/')), e_symbol(_st(u'b'))),
(b'(a+b)',) + _good_sexp(e_symbol(_st(u'a')), e_symbol(_st(u'+')), e_symbol(_st(u'b'))),
(b'(a-b)',) + _good_sexp(e_symbol(_st(u'a')), e_symbol(_st(u'-')), e_symbol(_st(u'b'))),
(b'(/%)',) + _good_sexp(e_symbol(_st(u'/%'))),
(b'(foo //bar\n::baz)',) + _good_sexp(e_symbol(_st(u'baz'), annotations=(_st(u'foo'),))),
(b'(foo/*bar*/ ::baz)',) + _good_sexp(e_symbol(_st(u'baz'), annotations=(_st(u'foo'),))),
(b'(\'a b\' //\n::cd)',) + _good_sexp(e_symbol(_st(u'cd'), annotations=(_st(u'a b'),))),
(b'(abc//baz\n-)',) + _good_sexp(e_symbol(_st(u'abc')), e_symbol(_st(u'-'))),
(b'(null-100/**/)',) + _good_sexp(e_null(), e_int(-100)),
(b'(//\nnull//\n)',) + _good_sexp(e_null()),
(b'(abc/*baz*/123)',) + _good_sexp(e_symbol(_st(u'abc')), e_int(123)),
(b'(abc/*baz*/-)',) + _good_sexp(e_symbol(_st(u'abc')), e_symbol(_st(u'-'))),
(b'(abc//baz\n123)',) + _good_sexp(e_symbol(_st(u'abc')), e_int(123)),
(b'(abc//\n/123)',) + _good_sexp(e_symbol(_st(u'abc')), e_symbol(_st(u'/')), e_int(123)),
(b'(abc/////\n/123)',) + _good_sexp(e_symbol(_st(u'abc')), e_symbol(_st(u'/')), e_int(123)),
(b'(abc/**//123)',) + _good_sexp(e_symbol(_st(u'abc')), e_symbol(_st(u'/')), e_int(123)),
(b'(foo%+null-//\n)',) + _good_sexp(
e_symbol(_st(u'foo')), e_symbol(_st(u'%+')), e_null(), e_symbol(_st(u'-//')) # Matches java.
),
(b'(null-100)',) + _good_sexp(e_null(), e_int(-100)),
(b'(null\'a\')',) + _good_sexp(e_null(), e_symbol(_st(u'a'))),
(b'(null\'a\'::b)',) + _good_sexp(e_null(), e_symbol(_st(u'b'), annotations=(_st(u'a'),))),
(b'(null.string.b)',) + _good_sexp(e_string(None), e_symbol(_st(u'.')), e_symbol(_st(u'b'))),
(b'(\'\'\'abc\'\'\'\'\')',) + _good_sexp(e_string(u'abc'), e_symbol(_st(u''))),
(b'(\'\'\'abc\'\'\'\'foo\')',) + _good_sexp(e_string(u'abc'), e_symbol(_st(u'foo'))),
(b'(\'\'\'abc\'\'\'\'\'42)',) + _good_sexp(e_string(u'abc'), e_symbol(_st(u'')), e_int(42)),
(b'(42\'a\'::b)',) + _good_sexp(e_int(42), e_symbol(_st(u'b'), annotations=(_st(u'a'),))),
(b'(1.23[])',) + _good_sexp(e_decimal(_d(u'1.23')), e_start_list(), e_end_list()),
(b'(\'\'\'foo\'\'\'/\'\'\'bar\'\'\')',) + _good_sexp(e_string(u'foo'), e_symbol(_st(u'/')), e_string(u'bar')),
(b'(-100)',) + _good_sexp(e_int(-100)),
(b'(-1.23 .)',) + _good_sexp(e_decimal(_d(u'-1.23')), e_symbol(_st(u'.'))),
(b'(1.)',) + _good_sexp(e_decimal(_d(u'1.'))),
(b'(1. .1)',) + _good_sexp(e_decimal(_d(u'1.')), e_symbol(_st(u'.')), e_int(1)),
(b'(2001-01-01/**/a)',) + _good_sexp(e_timestamp(_ts(2001, 1, 1, precision=_tp.DAY)), e_symbol(_st(u'a'))),
(b'(nul)',) + _good_sexp(e_symbol(_st(u'nul'))),
(b'(foo::%-bar)',) + _good_sexp(e_symbol(_st(u'%-'), annotations=(_st(u'foo'),)), e_symbol(_st(u'bar'))),
(b'(true.False+)',) + _good_sexp(e_bool(True), e_symbol(_st(u'.')), e_symbol(_st(u'False')), e_symbol(_st(u'+'))),
(b'(false)',) + _good_sexp(e_bool(False)),
(b'(-inf)',) + _good_sexp(e_float(_NEG_INF)),
(b'(+inf)',) + _good_sexp(e_float(_POS_INF)),
(b'(nan)',) + _good_sexp(e_float(_NAN)),
(b'(-inf+inf)',) + _good_sexp(e_float(_NEG_INF), e_float(_POS_INF)),
(b'(+inf\'foo\')',) + _good_sexp(e_float(_POS_INF), e_symbol(_st(u'foo'))),
(b'(-inf\'foo\'::bar)',) + _good_sexp(e_float(_NEG_INF), e_symbol(_st(u'bar'), annotations=(_st(u'foo'),))),
# TODO the inf tests do not match ion-java's behavior. They should be reconciled. I believe this is more correct.
(b'(- -inf-inf-in-infs-)',) + _good_sexp(
e_symbol(_st(u'-')), e_float(_NEG_INF), e_float(_NEG_INF), e_symbol(_st(u'-')),
e_symbol(_st(u'in')), e_symbol(_st(u'-')), e_symbol(_st(u'infs')), e_symbol(_st(u'-'))
),
(b'(+ +inf+inf+in+infs+)',) + _good_sexp(
e_symbol(_st(u'+')), e_float(_POS_INF), e_float(_POS_INF), e_symbol(_st(u'+')),
e_symbol(_st(u'in')), e_symbol(_st(u'+')), e_symbol(_st(u'infs')), e_symbol(_st(u'+'))
),
(b'(nan-nan+nan)',) + _good_sexp(
e_float(_NAN), e_symbol(_st(u'-')), e_float(_NAN), e_symbol(_st(u'+')),
e_float(_NAN)
),
(b'(nans-inf+na-)',) + _good_sexp(
e_symbol(_st(u'nans')), e_float(_NEG_INF), e_symbol(_st(u'+')),
e_symbol(_st(u'na')), e_symbol(_st(u'-'))
),
(b'({}()zar::[])',) + _good_sexp(
e_start_struct(), e_end_struct(),
e_start_sexp(), e_end_sexp(),
e_start_list(annotations=(_st(u'zar'),)), e_end_list()
),
)
_GOOD_SCALARS = (
(b'null', e_null()),
(b'false', e_bool(False)),
(b'true', e_bool(True)),
(b'null.bool', e_bool()),
(b'null.int', e_int()),
(b'0', e_int(0)),
(b'1_2_3', e_int(123)),
(b'0xfe', e_int(254)),
(b'0b101', e_int(5)),
(b'0b10_1', e_int(5)),
(b'-0b101', e_int(-5)),
(b'-0b10_1', e_int(-5)),
(b'1', e_int(1)),
(b'-1', e_int(-1)),
(b'0xc1c2', e_int(49602)),
(b'0xc1_c2', e_int(49602)),
(b'-0xc1c2', e_int(-49602)),
(b'-0xc1_c2', e_int(-49602)),
(b'9223372036854775808', e_int(9223372036854775808)),
(b'-9223372036854775809', e_int(-9223372036854775809)),
(b'null.float', e_float()),
(b'0.0e1', e_float(0.)),
(b'-0.0e-1', e_float(-0.)),
(b'0.0e+1', e_float(0.)),
(b'0.0E1', e_float(0.)),
(b'-inf', e_float(_NEG_INF)),
(b'+inf', e_float(_POS_INF)),
(b'nan', e_float(_NAN)),
(b'null.decimal', e_decimal()),
(b'0.0', e_decimal(_d(u'0.0'))),
(b'0.', e_decimal(_d(u'0.'))),
(b'-0.0', e_decimal(_d(u'-0.0'))),
(b'0d-1000', e_decimal(_d(u'0e-1000'))),
(b'0d1000', e_decimal(_d(u'0e1000'))),
(b'1d1', e_decimal(_d(u'1e1'))),
(b'1D1', e_decimal(_d(u'1e1'))),
(b'1234d-20', e_decimal(_d(u'1234e-20'))),
(b'1234d+20', e_decimal(_d(u'1234e20'))),
(b'1d0', e_decimal(_d(u'1e0'))),
(b'1d-1', e_decimal(_d(u'1e-1'))),
(b'0d-1', e_decimal(_d(u'0e-1'))),
(b'0d1', e_decimal(_d(u'0e1'))),
(b'-1d1', e_decimal(_d(u'-1e1'))),
(b'-1d0', e_decimal(_d(u'-1e0'))),
(b'-1d-1', e_decimal(_d(u'-1e-1'))),
(b'-0d-1', e_decimal(_d(u'-0e-1'))),
(b'-0d1', e_decimal(_d(u'-0e1'))),
(b'null.timestamp', e_timestamp()),
(b'2007-01T', e_timestamp(_ts(2007, 1, precision=_tp.MONTH))),
(b'2007T', e_timestamp(_ts(2007, precision=_tp.YEAR))),
(b'2007-01-01', e_timestamp(_ts(2007, 1, 1, precision=_tp.DAY))),
(
b'2000-01-01T00:00:00.0Z',
e_timestamp(_ts(
2000, 1, 1, 0, 0, 0, 0, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=1
))
),
(
b'2000-01-01T00:00:00.000Z',
e_timestamp(_ts(
2000, 1, 1, 0, 0, 0, 0, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=3
))
),
(
b'2000-01-01T00:00:00.999999Z',
e_timestamp(_ts(
2000, 1, 1, 0, 0, 0, 999999, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=6
))
),
(
b'2000-01-01T00:00:00.99999900000Z',
e_timestamp(_ts(
2000, 1, 1, 0, 0, 0, 999999, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=6
))
),
(
b'2000-01-01T00:00:00.9999999Z',
e_timestamp(_ts(
2000, 1, 1, 0, 0, 0, None, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=None,
fractional_seconds=Decimal('0.9999999')
))
),
(
b'2000-01-01T00:00:00.1234567Z',
e_timestamp(_ts(
2000, 1, 1, 0, 0, 0, None, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=None,
fractional_seconds=Decimal('0.1234567')
))
),
(
b'2000-01-01T00:00:00.1234567800Z',
e_timestamp(_ts(
2000, 1, 1, 0, 0, 0, None, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=None,
fractional_seconds=Decimal('0.1234567800')
))
),
(
b'2000-01-01T00:00:00.000-00:00',
e_timestamp(_ts(2000, 1, 1, 0, 0, 0, 0, precision=_tp.SECOND, fractional_precision=3))
),
(
b'2007-02-23T00:00+00:00',
e_timestamp(_ts(2007, 2, 23, 0, 0, off_hours=0, off_minutes=0, precision=_tp.MINUTE))
),
(b'2007-01-01T', e_timestamp(_ts(2007, 1, 1, precision=_tp.DAY))),
(b'2000-01-01T00:00:00Z', e_timestamp(_ts(2000, 1, 1, 0, 0, 0, off_hours=0, off_minutes=0, precision=_tp.SECOND))),
(
b'2007-02-23T00:00:00-00:00',
e_timestamp(_ts(2007, 2, 23, 0, 0, 0, precision=_tp.SECOND))
),
(
b'2007-02-23T12:14:33.079-08:00',
e_timestamp(_ts(
2007, 2, 23, 12, 14, 33, 79000, off_hours=-8, off_minutes=0, precision=_tp.SECOND, fractional_precision=3
))
),
(
b'2007-02-23T20:14:33.079Z',
e_timestamp(_ts(
2007, 2, 23, 20, 14, 33, 79000, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=3
))
),
(
b'2007-02-23T20:14:33.079+00:00',
e_timestamp(_ts(
2007, 2, 23, 20, 14, 33, 79000, off_hours=0, off_minutes=0, precision=_tp.SECOND, fractional_precision=3
))
),
(b'0001T', e_timestamp(_ts(1, precision=_tp.YEAR))),
(b'0001-01-01T00:00:00Z', e_timestamp(_ts(1, 1, 1, 0, 0, 0, off_hours=0, off_minutes=0, precision=_tp.SECOND))),
(b'2016-02-29T', e_timestamp(_ts(2016, 2, 29, precision=_tp.DAY))),
(b'null.symbol', e_symbol()),
(b'nul', e_symbol(_st(u'nul'))), # See the logic in the event generators that forces these to emit an event.
(b'$foo', e_symbol(_st(u'$foo'))),
(b'$10', e_symbol(_sid(10))),
(b'$10n', e_symbol(_st(u'$10n'))),
(b'$2', e_symbol(_sid(2))), # Note: NOT an IVM event
(b"'$ion_1_0'", e_symbol(_st(u'$ion_1_0'))), # Note: NOT an IVM event
(b'$', e_symbol(_st(u'$'))),
(b'\'a b\'', e_symbol(_st(u'a b'))),
(b'\'\'', e_symbol(_st(u''))),
(b'null.string', e_string()),
(b'" "', e_string(u' ')),
(b'\'\'\'foo\'\'\' \'\'\'\'\'\' \'\'\'""\'\'\'', e_string(u'foo""')),
(b'\'\'\'ab\'\'cd\'\'\'', e_string(u'ab\'\'cd')),
(b"'''\r\n \r \n \n\r'''", e_string(u'\n \n \n \n\n')),
(b'null.clob', e_clob()),
(b'{{""}}', e_clob(b'')),
(b'{{ "abcd" }}', e_clob(b'abcd')),
(b'{{"abcd"}}', e_clob(b'abcd')),
(b'{{"abcd"\n}}', e_clob(b'abcd')),
(b'{{\'\'\'ab\'\'\' \'\'\'cd\'\'\'}}', e_clob(b'abcd')),
(b'{{\'\'\'ab\'\'\'\n\'\'\'cd\'\'\'}}', e_clob(b'abcd')),
(b'null.blob', e_blob()),
(b'{{}}', e_blob(b'')),
(b'{{ YW1heg== }}', e_blob(b'amaz')),
(b'{{ YW1hem8= }}', e_blob(b'amazo')),
(b'{{ YW1hem9u }}', e_blob(b'amazon')),
(b'{{ YW1heg = = }}', e_blob(b'amaz')),
(b'{{aW\n9u}}', e_blob(b'ion')),
(b'{{aW9u}}', e_blob(b'ion')),
(b'null.list', e_null_list()),
(b'null.sexp', e_null_sexp()),
(b'null.struct', e_null_struct()),
)
def _scalar_event_pairs(data, events, info):
"""Generates event pairs for all scalars.
Each scalar is represented by a sequence whose first element is the raw data and whose following elements are the
expected output events.
"""
first = True
delimiter, in_container = info
space_delimited = not (b',' in delimiter)
for event in events:
input_event = NEXT
if first:
input_event = e_read(data + delimiter)
if space_delimited and event.value is not None \
and ((event.ion_type is IonType.SYMBOL) or
(event.ion_type is IonType.STRING and
six.byte2int(b'"') != six.indexbytes(data, 0))): # triple-quoted strings
# Because annotations and field names are symbols, a space delimiter after a symbol isn't enough to
# generate a symbol event immediately. Similarly, triple-quoted strings may be followed by another
# triple-quoted string if only delimited by whitespace or comments.
yield input_event, INC
if in_container:
# Within s-expressions, these types are delimited in these tests by another value - in this case,
# int 0 (but it could be anything).
yield e_read(b'0' + delimiter), event
input_event, event = (NEXT, e_int(0))
else:
# This is a top-level value, so it may be flushed with NEXT after INCOMPLETE.
input_event, event = (NEXT, event)
first = False
yield input_event, event
_scalar_iter = partial(value_iter, _scalar_event_pairs, _GOOD_SCALARS)
@coroutine
def _scalar_params():
"""Generates scalars as reader parameters."""
while True:
info = yield
for data, event_pairs in _scalar_iter(info):
yield _P(
desc=data,
event_pairs=event_pairs + [(NEXT, INC)]
)
def _top_level_value_params(delimiter=b' ', is_delegate=False):
"""Converts the top-level tuple list into parameters with appropriate ``NEXT`` inputs.
The expectation is starting from an end of stream top-level context.
"""
info = (delimiter, False)
for data, event_pairs in _scalar_iter(info):
_, first = event_pairs[0]
if first.event_type is IonEventType.INCOMPLETE: # Happens with space-delimited symbol values.
_, first = event_pairs[1]
yield _P(
desc='TL %s - %s - %r' %
(first.event_type.name, first.ion_type.name, data),
event_pairs=[(NEXT, END)] + event_pairs + [(NEXT, END)],
)
if is_delegate:
yield
@coroutine
def _all_scalars_in_one_container_params():
"""Generates one parameter that contains all scalar events in a single container. """
while True:
info = yield
@listify
def generate_event_pairs():
for data, event_pairs in _scalar_iter(info):
pairs = ((i, o) for i, o in event_pairs)
while True:
try:
input_event, output_event = next(pairs)
yield input_event, output_event
if output_event is INC:
# This is a symbol value.
yield next(pairs) # Input: a scalar. Output: the symbol value's event.
yield next(pairs) # Input: NEXT. Output: the previous scalar's event.
yield (NEXT, INC)
except StopIteration:
break
yield _P(
desc='ALL',
event_pairs=generate_event_pairs()
)
def _collect_params(param_generator, info):
"""Collects all output of the given coroutine into a single list."""
params = []
while True:
param = param_generator.send(info)
if param is None:
return params
params.append(param)
_TEST_SYMBOLS = (
(
b'foo',
b'$foo',
b'$ios',
b'$',
b'$10',
b'\'a b\'',
b'foo ',
b'\'a b\' ',
b'foo/*bar*/',
b'\'a b\' //bar\r',
b'\'\'',
b'\'\\U0001f4a9\'',
),
(
_st(u'foo'),
_st(u'$foo'),
_st(u'$ios'),
_st(u'$'),
_sid(10),
_st(u'a b'),
_st(u'foo'),
_st(u'a b'),
_st(u'foo'),
_st(u'a b'),
_st(u''),
_st(u'\U0001f4a9'),
)
)
_TEST_FIELD_NAMES = (
_TEST_SYMBOLS[0] +
(
b'"foo"',
b'"foo"//bar\n',
b'\'\'\'foo\'\'\'/*bar*/\'\'\'baz\'\'\'',
b'//zar\n\'\'\'foo\'\'\'/*bar*/\'\'\'baz\'\'\'',
b'\'\'\'a \'\'\'\t\'\'\'b\'\'\'',
b'\'\'\'a \'\'\'\'\'\'b\'\'\'/*zar*/',
b'\'\'\'\'\'\'',
b'"\\xf6"',
b"'''\r\n \r \n \n\r'''",
),
_TEST_SYMBOLS[1] +
(
_st(u'foo'),
_st(u'foo'),
_st(u'foobaz'),
_st(u'foobaz'),
_st(u'a b'),
_st(u'a b'),
_st(u''),
_st(u'\xf6'),
_st(u'\n \n \n \n\n'),
)
)
def _generate_annotations():
"""Circularly generates annotations."""
assert len(_TEST_SYMBOLS[0]) == len(_TEST_SYMBOLS[1])
i = 1
num_symbols = len(_TEST_SYMBOLS[0])
while True:
yield _TEST_SYMBOLS[0][0:i], _TEST_SYMBOLS[1][0:i]
i += 1
if i == num_symbols:
i = 0
_annotations_generator = _generate_annotations()
@coroutine
def _annotate_params(params, is_delegate=False):
"""Adds annotation wrappers for a given iterator of parameters."""
while True:
info = yield
params_list = _collect_params(params, info)
test_annotations, expected_annotations = next(_annotations_generator)
for param in params_list:
@listify
def annotated():
pairs = ((i, o) for i, o in param.event_pairs)
while True:
try:
input_event, output_event = next(pairs)
if input_event.type is ReadEventType.DATA:
data = b''
for test_annotation in test_annotations:
data += test_annotation + b'::'
data += input_event.data
input_event = read_data_event(data)
if output_event is INC:
yield input_event, output_event
input_event, output_event = next(pairs)
output_event = output_event.derive_annotations(expected_annotations)
yield input_event, output_event
except StopIteration:
break
yield _P(
desc='ANN %r on %s' % (expected_annotations, param.desc),
event_pairs=annotated(),
)
if not is_delegate:
break
def _generate_field_name():
"""Circularly generates field names."""
assert len(_TEST_FIELD_NAMES[0]) == len(_TEST_FIELD_NAMES[1])
i = 0
num_symbols = len(_TEST_FIELD_NAMES[0])
while True:
yield _TEST_FIELD_NAMES[0][i], _TEST_FIELD_NAMES[1][i]
i += 1
if i == num_symbols:
i = 0
_field_name_generator = _generate_field_name()
@coroutine
def _containerize_params(param_generator, with_skip=True, is_delegate=False, top_level=True):
"""Adds container wrappers for a given iteration of parameters."""
while True:
yield
for info in ((IonType.LIST, b'[', b']', b','),
(IonType.SEXP, b'(', b')', b' '), # Sexps without delimiters are tested separately
(IonType.STRUCT, b'{ ', b'}', b','), # Space after opening bracket for instant event.
(IonType.LIST, b'[/**/', b'//\n]', b'//\r,'),
(IonType.SEXP, b'(//\n', b'/**/)', b'/**/'),
(IonType.STRUCT, b'{/**/', b'//\r}', b'/**/,')):
ion_type = info[0]
params = _collect_params(param_generator, (info[3], True))
for param in params:
@listify
def add_field_names(event_pairs):
container = False
first = True
for read_event, ion_event in event_pairs:
if not container and read_event.type is ReadEventType.DATA:
field_name, expected_field_name = next(_field_name_generator)
data = field_name + b':' + read_event.data
read_event = read_data_event(data)
ion_event = ion_event.derive_field_name(expected_field_name)
if first and ion_event.event_type is IonEventType.CONTAINER_START:
# For containers within a struct--only the CONTAINER_START event gets adorned with a
# field name
container = True
first = False
yield read_event, ion_event
start = []
end = [(e_read(info[2]), e_end(ion_type))]
if top_level:
start = [(NEXT, END)]
end += [(NEXT, END)]
else:
end += [(NEXT, INC)]
start += [
(e_read(info[1]), e_start(ion_type)),
(NEXT, INC)
]
if ion_type is IonType.STRUCT:
mid = add_field_names(param.event_pairs)
else:
mid = param.event_pairs
desc = 'CONTAINER %s - %s' % (ion_type.name, param.desc)
yield _P(
desc=desc,
event_pairs=start + mid + end,
)
if with_skip:
@listify
def only_data_inc(event_pairs):
for read_event, ion_event in event_pairs:
if read_event.type is ReadEventType.DATA:
yield read_event, INC
start = start[:-1] + [(SKIP, INC)]
mid = only_data_inc(mid)
yield _P(
desc='SKIP %s' % desc,
event_pairs=start + mid + end,
)
if not is_delegate:
break
def _expect_event(expected_event, data, events, delimiter):
"""Generates event pairs for a stream that ends in an expected event (or exception), given the text and the output
events preceding the expected event.
"""
events += (expected_event,)
outputs = events[1:]
event_pairs = [(e_read(data + delimiter), events[0])] + list(zip([NEXT] * len(outputs), outputs))
return event_pairs
@coroutine
def _basic_params(event_func, desc, delimiter, data_event_pairs, is_delegate=False, top_level=True):
"""Generates parameters from a sequence whose first element is the raw data and the following
elements are the expected output events.
"""
while True:
yield
params = list(zip(*list(value_iter(event_func, data_event_pairs, delimiter))))[1]
for param in _paired_params(params, desc, top_level):
yield param
if not is_delegate:
break
def _paired_params(params, desc, top_level=True):
"""Generates reader parameters from sequences of input/output event pairs."""
for event_pairs in params:
data = event_pairs[0][0].data
if top_level:
event_pairs = [(NEXT, END)] + event_pairs
yield _P(
desc='%s %s' % (desc, data),
event_pairs=event_pairs,
is_unicode=isinstance(data, six.text_type)
)
_ion_exception = partial(_expect_event, IonException)
_bad_grammar_params = partial(_basic_params, _ion_exception, 'BAD GRAMMAR', b' ')
_bad_unicode_params = partial(_basic_params, _ion_exception, 'BAD GRAMMAR - UNICODE', u' ')
_value_error = partial(_expect_event, ValueError)
_bad_value_params = partial(_basic_params, _value_error, 'BAD VALUE', b' ')
_incomplete = partial(_expect_event, INC)
_incomplete_params = partial(_basic_params, _incomplete, 'INC', b'')
_end = partial(_expect_event, END)
_good_params = partial(_basic_params, _end, 'GOOD', b'')
_good_unicode_params = partial(_basic_params, _end, 'GOOD - UNICODE', u'')
@parametrize(*chain(
_good_params(_GOOD),
_bad_grammar_params(_BAD_GRAMMAR),
_bad_value_params(_BAD_VALUE),
_incomplete_params(_INCOMPLETE),
_good_unicode_params(_GOOD_UNICODE),
_good_unicode_params(_GOOD_ESCAPES_FROM_UNICODE),
_good_params(_GOOD_ESCAPES_FROM_BYTES),
_bad_unicode_params(_BAD_UNICODE),
_bad_unicode_params(_BAD_ESCAPES_FROM_UNICODE),
_bad_grammar_params(_BAD_ESCAPES_FROM_BYTES),
_paired_params(_INCOMPLETE_ESCAPES, 'INCOMPLETE ESCAPES'),
_paired_params(_UNICODE_SURROGATES, 'UNICODE SURROGATES') if _NARROW_BUILD else (),
_good_params(_UNSPACED_SEXPS),
_paired_params(_SKIP, 'SKIP'),
_paired_params(_GOOD_FLUSH, 'GOOD FLUSH'),
_paired_params(_BAD_FLUSH, 'BAD FLUSH'),
# All top-level values as individual data events, space-delimited.
_top_level_value_params(),
# All top-level values as one data event, space-delimited.
all_top_level_as_one_stream_params(_scalar_iter, (b' ', False)),
# All top-level values as one data event, block comment-delimited.
all_top_level_as_one_stream_params(_scalar_iter, (b'/*foo*/', False)),
# All top-level values as one data event, line comment-delimited.
all_top_level_as_one_stream_params(_scalar_iter, (b'//foo\n', False)),
# All annotated top-level values, space-delimited.
_annotate_params(_top_level_value_params(is_delegate=True)),
# All annotated top-level values, comment-delimited.
_annotate_params(_top_level_value_params(b'//foo\n/*bar*/', is_delegate=True)),
_annotate_params(_good_params(_UNSPACED_SEXPS, is_delegate=True)),
# All values, each as the only value within a container.
_containerize_params(_scalar_params()),
_containerize_params(_containerize_params(_scalar_params(), is_delegate=True, top_level=False), with_skip=False),
# All values, annotated, each as the only value within a container.
_containerize_params(_annotate_params(_scalar_params(), is_delegate=True)),
# All values within a single container.
_containerize_params(_all_scalars_in_one_container_params()),
# Annotated containers.
_containerize_params(_annotate_params(_all_scalars_in_one_container_params(), is_delegate=True)),
# All unspaced sexps, annotated, in containers.
_containerize_params(_annotate_params(_incomplete_params(
_UNSPACED_SEXPS, is_delegate=True, top_level=False), is_delegate=True
)),
))
def test_raw_reader(p):
reader_scaffold(reader(is_unicode=p.is_unicode), p.event_pairs)
|
|
# coding: utf-8
"""
This module contains extra functions/shortcuts used to render HTML.
"""
import json
import re
import sys
from django import template
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import render_to_string
def _render_to_string(request, tpl, user_context):
"""Custom rendering function.
Just a wrapper which automatically adds a RequestContext instance
(useful to use settings variables like STATIC_URL inside templates)
"""
return render_to_string(tpl, user_context,
context_instance=template.RequestContext(request))
def _render_error(request, errortpl="error", user_context=None):
if user_context is None:
user_context = {}
return render(
request, "common/%s.html" % errortpl, user_context
)
def render_actions(actions):
t = template.Template("""{% load lib_tags %}
{% for a in actions %}{% render_link a %}{% endfor %}
""")
return t.render(template.Context(dict(actions=actions)))
def getctx(status, level=1, callback=None, **kwargs):
if not callback:
callername = sys._getframe(level).f_code.co_name
else:
callername = callback
ctx = {"status": status, "callback": callername}
for kw, v in kwargs.iteritems():
ctx[kw] = v
return ctx
def ajax_response(request, status="ok", respmsg=None,
url=None, ajaxnav=False, norefresh=False,
template=None, **kwargs):
"""Ajax response shortcut
Simple shortcut that sends an JSON response. If a template is
provided, a 'content' field will be added to the response,
containing the result of this template rendering.
:param request: a Request object
:param status: the response status ('ok' or 'ko)
:param respmsg: the message that will displayed in the interface
:param url: url to display after receiving this response
:param ajaxnav:
:param norefresh: do not refresh the page after receiving this response
:param template: eventual template's path
:param kwargs: dict used for template rendering
"""
ctx = {}
for k, v in kwargs.iteritems():
ctx[k] = v
if template is not None:
content = _render_to_string(request, template, ctx)
elif "content" in kwargs:
content = kwargs["content"]
else:
content = ""
jsonctx = {"status": status, "content": content}
if respmsg is not None:
jsonctx["respmsg"] = respmsg
if ajaxnav:
jsonctx["ajaxnav"] = True
if url is not None:
jsonctx["url"] = url
jsonctx["norefresh"] = norefresh
return JsonResponse(jsonctx)
def render_to_json_response(context, **response_kwargs):
"""Simple shortcut to render a JSON response.
:param dict context: response content
:return: ``HttpResponse`` object
"""
data = json.dumps(context)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def static_url(path):
"""Returns the correct static url for a given file
:param path: the targeted static media
"""
if path.startswith("/"):
path = path[1:]
return "%s%s" % (settings.STATIC_URL, path)
def size2integer(value):
"""Try to convert a string representing a size to an integer value
in bytes.
Supported formats:
* K|k for KB
* M|m for MB
* G|g for GB
:param value: the string to convert
:return: the corresponding integer value
"""
m = re.match("(\d+)\s*(\w+)", value)
if m is None:
if re.match("\d+", value):
return int(value)
return 0
if m.group(2)[0] in ["K", "k"]:
return int(m.group(1)) * 2 ** 10
if m.group(2)[0] in ["M", "m"]:
return int(m.group(1)) * 2 ** 20
if m.group(2)[0] in ["G", "g"]:
return int(m.group(1)) * 2 ** 30
return 0
@login_required
def topredirection(request):
"""Simple view to redirect the request when no application is specified.
The default "top redirection" can be specified in the *Admin >
Settings* panel. It is the application that will be
launched. Those not allowed to access the application will be
redirected to their preferences page.
This feature only applies to simple users.
:param request: a Request object
"""
from modoboa.lib import parameters
from modoboa.core.extensions import exts_pool
if request.user.group == 'SimpleUsers':
topredir = parameters.get_admin("DEFAULT_TOP_REDIRECTION", app="core")
if topredir != "user":
infos = exts_pool.get_extension_infos(topredir)
path = infos["url"] if infos["url"] else infos["name"]
else:
path = reverse("core:user_index")
else:
# FIXME
path = reverse("modoboa_admin:domain_list")
return HttpResponseRedirect(path)
class NavigationParameters(object):
"""
Just a simple object to manipulate navigation parameters.
"""
def __init__(self, request, sessionkey):
self.request = request
self.sessionkey = sessionkey
self.parameters = [('pattern', '', True),
('criteria', 'from_addr', False)]
def __getitem__(self, key):
"""Retrieve an item."""
if self.sessionkey not in self.request.session:
raise KeyError
return self.request.session[self.sessionkey][key]
def __contains__(self, key):
"""Check if key is present."""
if self.sessionkey not in self.request.session:
return False
return key in self.request.session[self.sessionkey]
def __setitem__(self, key, value):
"""Set a new item."""
self.request.session[self.sessionkey][key] = value
def _store_page(self):
"""Specific method to store the current page."""
self["page"] = int(self.request.GET.get("page", 1))
def store(self):
"""Store navigation parameters into session.
"""
if self.sessionkey not in self.request.session:
self.request.session[self.sessionkey] = {}
self._store_page()
navparams = self.request.session[self.sessionkey]
navparams["order"] = self.request.GET.get("sort_order", "-date")
for param, defvalue, escape in self.parameters:
value = self.request.GET.get(param, defvalue)
if value is None:
if param in navparams:
del navparams[param]
continue
navparams[param] = re.escape(value) if escape else value
self.request.session.modified = True
def get(self, param, default_value=None):
"""Retrieve a navigation parameter.
Just a simple getter to avoid using the full key name to
access a parameter.
:param str param: parameter name
:param defaultvalue: default value if none is found
:return: parameter's value
"""
if self.sessionkey not in self.request.session:
return default_value
return self.request.session[self.sessionkey].get(param, default_value)
def remove(self, param):
"""Remove a navigation parameter from session.
:param str param: parameter name
"""
navparams = self.request.session[self.sessionkey]
if param in navparams:
del navparams[param]
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dataproc_v1.services.job_controller import pagers
from google.cloud.dataproc_v1.types import jobs
from .transports.base import JobControllerTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import JobControllerGrpcTransport
from .transports.grpc_asyncio import JobControllerGrpcAsyncIOTransport
class JobControllerClientMeta(type):
"""Metaclass for the JobController client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[JobControllerTransport]]
_transport_registry["grpc"] = JobControllerGrpcTransport
_transport_registry["grpc_asyncio"] = JobControllerGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[JobControllerTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class JobControllerClient(metaclass=JobControllerClientMeta):
"""The JobController provides methods to manage jobs."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dataproc.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobControllerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobControllerClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> JobControllerTransport:
"""Returns the transport used by the client instance.
Returns:
JobControllerTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, JobControllerTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the job controller client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, JobControllerTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, JobControllerTransport):
# transport is a JobControllerTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def submit_job(
self,
request: Union[jobs.SubmitJobRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
job: jobs.Job = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.Job:
r"""Submits a job to a cluster.
.. code-block:: python
from google.cloud import dataproc_v1
def sample_submit_job():
# Create a client
client = dataproc_v1.JobControllerClient()
# Initialize request argument(s)
job = dataproc_v1.Job()
job.hadoop_job.main_jar_file_uri = "main_jar_file_uri_value"
job.placement.cluster_name = "cluster_name_value"
request = dataproc_v1.SubmitJobRequest(
project_id="project_id_value",
region="region_value",
job=job,
)
# Make the request
response = client.submit_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.SubmitJobRequest, dict]):
The request object. A request to submit a job.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the job belongs
to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (google.cloud.dataproc_v1.types.Job):
Required. The job resource.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataproc_v1.types.Job:
A Dataproc job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a jobs.SubmitJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.SubmitJobRequest):
request = jobs.SubmitJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.submit_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def submit_job_as_operation(
self,
request: Union[jobs.SubmitJobRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
job: jobs.Job = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Submits job to a cluster.
.. code-block:: python
from google.cloud import dataproc_v1
def sample_submit_job_as_operation():
# Create a client
client = dataproc_v1.JobControllerClient()
# Initialize request argument(s)
job = dataproc_v1.Job()
job.hadoop_job.main_jar_file_uri = "main_jar_file_uri_value"
job.placement.cluster_name = "cluster_name_value"
request = dataproc_v1.SubmitJobRequest(
project_id="project_id_value",
region="region_value",
job=job,
)
# Make the request
operation = client.submit_job_as_operation(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.SubmitJobRequest, dict]):
The request object. A request to submit a job.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the job belongs
to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job (google.cloud.dataproc_v1.types.Job):
Required. The job resource.
This corresponds to the ``job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dataproc_v1.types.Job` A Dataproc
job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a jobs.SubmitJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.SubmitJobRequest):
request = jobs.SubmitJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if job is not None:
request.job = job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.submit_job_as_operation]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
jobs.Job,
metadata_type=jobs.JobMetadata,
)
# Done; return the response.
return response
def get_job(
self,
request: Union[jobs.GetJobRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
job_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.Job:
r"""Gets the resource representation for a job in a
project.
.. code-block:: python
from google.cloud import dataproc_v1
def sample_get_job():
# Create a client
client = dataproc_v1.JobControllerClient()
# Initialize request argument(s)
request = dataproc_v1.GetJobRequest(
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
# Make the request
response = client.get_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.GetJobRequest, dict]):
The request object. A request to get the resource
representation for a job in a project.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the job belongs
to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_id (str):
Required. The job ID.
This corresponds to the ``job_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataproc_v1.types.Job:
A Dataproc job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, job_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a jobs.GetJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.GetJobRequest):
request = jobs.GetJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if job_id is not None:
request.job_id = job_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_jobs(
self,
request: Union[jobs.ListJobsRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
filter: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListJobsPager:
r"""Lists regions/{region}/jobs in a project.
.. code-block:: python
from google.cloud import dataproc_v1
def sample_list_jobs():
# Create a client
client = dataproc_v1.JobControllerClient()
# Initialize request argument(s)
request = dataproc_v1.ListJobsRequest(
project_id="project_id_value",
region="region_value",
)
# Make the request
page_result = client.list_jobs(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.ListJobsRequest, dict]):
The request object. A request to list jobs in a project.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the job belongs
to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
filter (str):
Optional. A filter constraining the jobs to list.
Filters are case-sensitive and have the following
syntax:
[field = value] AND [field [= value]] ...
where **field** is ``status.state`` or ``labels.[KEY]``,
and ``[KEY]`` is a label key. **value** can be ``*`` to
match all values. ``status.state`` can be either
``ACTIVE`` or ``NON_ACTIVE``. Only the logical ``AND``
operator is supported; space-separated items are treated
as having an implicit ``AND`` operator.
Example filter:
status.state = ACTIVE AND labels.env = staging AND
labels.starred = \*
This corresponds to the ``filter`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataproc_v1.services.job_controller.pagers.ListJobsPager:
A list of jobs in a project.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, filter])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a jobs.ListJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.ListJobsRequest):
request = jobs.ListJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if filter is not None:
request.filter = filter
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_jobs]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def update_job(
self,
request: Union[jobs.UpdateJobRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.Job:
r"""Updates a job in a project.
.. code-block:: python
from google.cloud import dataproc_v1
def sample_update_job():
# Create a client
client = dataproc_v1.JobControllerClient()
# Initialize request argument(s)
job = dataproc_v1.Job()
job.hadoop_job.main_jar_file_uri = "main_jar_file_uri_value"
job.placement.cluster_name = "cluster_name_value"
request = dataproc_v1.UpdateJobRequest(
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
job=job,
)
# Make the request
response = client.update_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.UpdateJobRequest, dict]):
The request object. A request to update a job.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataproc_v1.types.Job:
A Dataproc job resource.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a jobs.UpdateJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.UpdateJobRequest):
request = jobs.UpdateJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def cancel_job(
self,
request: Union[jobs.CancelJobRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
job_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> jobs.Job:
r"""Starts a job cancellation request. To access the job resource
after cancellation, call
`regions/{region}/jobs.list <https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list>`__
or
`regions/{region}/jobs.get <https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get>`__.
.. code-block:: python
from google.cloud import dataproc_v1
def sample_cancel_job():
# Create a client
client = dataproc_v1.JobControllerClient()
# Initialize request argument(s)
request = dataproc_v1.CancelJobRequest(
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
# Make the request
response = client.cancel_job(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.dataproc_v1.types.CancelJobRequest, dict]):
The request object. A request to cancel a job.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the job belongs
to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_id (str):
Required. The job ID.
This corresponds to the ``job_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dataproc_v1.types.Job:
A Dataproc job resource.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, job_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a jobs.CancelJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.CancelJobRequest):
request = jobs.CancelJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if job_id is not None:
request.job_id = job_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_job]
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_job(
self,
request: Union[jobs.DeleteJobRequest, dict] = None,
*,
project_id: str = None,
region: str = None,
job_id: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the job from the project. If the job is active, the
delete fails, and the response returns ``FAILED_PRECONDITION``.
.. code-block:: python
from google.cloud import dataproc_v1
def sample_delete_job():
# Create a client
client = dataproc_v1.JobControllerClient()
# Initialize request argument(s)
request = dataproc_v1.DeleteJobRequest(
project_id="project_id_value",
region="region_value",
job_id="job_id_value",
)
# Make the request
client.delete_job(request=request)
Args:
request (Union[google.cloud.dataproc_v1.types.DeleteJobRequest, dict]):
The request object. A request to delete a job.
project_id (str):
Required. The ID of the Google Cloud
Platform project that the job belongs
to.
This corresponds to the ``project_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
region (str):
Required. The Dataproc region in
which to handle the request.
This corresponds to the ``region`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
job_id (str):
Required. The job ID.
This corresponds to the ``job_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([project_id, region, job_id])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a jobs.DeleteJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, jobs.DeleteJobRequest):
request = jobs.DeleteJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if project_id is not None:
request.project_id = project_id
if region is not None:
request.region = region
if job_id is not None:
request.job_id = job_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_job]
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("JobControllerClient",)
|
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import pytest
from openpyxl.xml.constants import CHART_DRAWING_NS, SHEET_DRAWING_NS
from openpyxl.xml.functions import Element, fromstring, tostring
from openpyxl.tests.helper import compare_xml
from openpyxl.tests.schema import drawing_schema, chart_schema
def test_bounding_box():
from openpyxl.drawing import bounding_box
w, h = bounding_box(80, 80, 90, 100)
assert w == 72
assert h == 80
class TestDrawing(object):
def setup(self):
from openpyxl.drawing import Drawing
self.drawing = Drawing()
def test_ctor(self):
d = self.drawing
assert d.coordinates == ((1, 2), (16, 8))
assert d.width == 21
assert d.height == 192
assert d.left == 0
assert d.top == 0
assert d.count == 0
assert d.rotation == 0
assert d.resize_proportional is False
assert d.description == ""
assert d.name == ""
def test_width(self):
d = self.drawing
d.width = 100
d.height = 50
assert d.width == 100
def test_proportional_width(self):
d = self.drawing
d.resize_proportional = True
d.width = 100
d.height = 50
assert (d.width, d.height) == (5, 50)
def test_height(self):
d = self.drawing
d.height = 50
d.width = 100
assert d.height == 50
def test_proportional_height(self):
d = self.drawing
d.resize_proportional = True
d.height = 50
d.width = 100
assert (d.width, d.height) == (100, 1000)
def test_set_dimension(self):
d = self.drawing
d.resize_proportional = True
d.set_dimension(100, 50)
assert d.width == 6
assert d.height == 50
d.set_dimension(50, 500)
assert d.width == 50
assert d.height == 417
def test_get_emu(self):
d = self.drawing
dims = d.get_emu_dimensions()
assert dims == (0, 0, 200025, 1828800)
class DummyDrawing(object):
"""Shapes need charts which need drawings"""
width = 10
height = 20
class DummyChart(object):
"""Shapes need a chart to calculate their coordinates"""
width = 100
height = 100
def __init__(self):
self.drawing = DummyDrawing()
def _get_margin_left(self):
return 10
def _get_margin_top(self):
return 5
def get_x_units(self):
return 25
def get_y_units(self):
return 15
class TestShape(object):
def setup(self):
from openpyxl.drawing import Shape
self.shape = Shape(chart=DummyChart())
def test_ctor(self):
s = self.shape
assert s.axis_coordinates == ((0, 0), (1, 1))
assert s.text is None
assert s.scheme == "accent1"
assert s.style == "rect"
assert s.border_color == "000000"
assert s.color == "FFFFFF"
assert s.text_color == "000000"
assert s.border_width == 0
def test_border_color(self):
s = self.shape
s.border_color = "BBBBBB"
assert s.border_color == "BBBBBB"
def test_color(self):
s = self.shape
s.color = "000000"
assert s.color == "000000"
def test_text_color(self):
s = self.shape
s.text_color = "FF0000"
assert s.text_color == "FF0000"
def test_border_width(self):
s = self.shape
s.border_width = 50
assert s.border_width == 50
def test_coordinates(self):
s = self.shape
s.coordinates = ((0, 0), (60, 80))
assert s.axis_coordinates == ((0, 0), (60, 80))
assert s.coordinates == (1, 1, 1, 1)
def test_pct(self):
s = self.shape
assert s._norm_pct(10) == 1
assert s._norm_pct(0.5) == 0.5
assert s._norm_pct(-10) == 0
class TestShadow(object):
def setup(self):
from openpyxl.drawing import Shadow
self.shadow = Shadow()
def test_ctor(self):
s = self.shadow
assert s.visible == False
assert s.blurRadius == 6
assert s.distance == 2
assert s.direction == 0
assert s.alignment == "br"
assert s.color.index == "00000000"
assert s.alpha == 50
class DummySheet(object):
"""Required for images"""
def point_pos(self, vertical, horizontal):
return vertical, horizontal
class DummyCell(object):
"""Required for images"""
column = "A"
row = 1
anchor = (0, 0)
def __init__(self):
self.parent = DummySheet()
@pytest.fixture
def Image():
from openpyxl.drawing import Image
return Image
@pytest.fixture()
def ImageFile(datadir, Image):
datadir.chdir()
return Image("plain.png")
class TestImage(object):
@pytest.mark.pil_not_installed
def test_import(self, Image, datadir):
datadir.chdir()
with pytest.raises(ImportError):
Image._import_image("plain.png")
@pytest.mark.pil_required
def test_ctor(self, Image, datadir):
datadir.chdir()
i = Image(img="plain.png")
assert i.nochangearrowheads == True
assert i.nochangeaspect == True
d = i.drawing
assert d.coordinates == ((0, 0), (1, 1))
assert d.width == 118
assert d.height == 118
@pytest.mark.pil_required
def test_anchor(self, Image, datadir):
datadir.chdir()
i = Image("plain.png")
c = DummyCell()
vals = i.anchor(c)
assert vals == (('A', 1), (118, 118))
@pytest.mark.pil_required
def test_anchor_onecell(self, Image, datadir):
datadir.chdir()
i = Image("plain.png")
c = DummyCell()
vals = i.anchor(c, anchortype="oneCell")
assert vals == ((0, 0), None)
class TestDrawingWriter(object):
def setup(self):
from openpyxl.writer.drawings import DrawingWriter
sheet = DummySheet()
sheet._charts = []
sheet._images = []
self.dw = DrawingWriter(sheet=sheet)
def test_write(self):
xml = self.dw.write()
expected = """<xdr:wsDr xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing">
</xdr:wsDr>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
def test_write_chart(self):
from openpyxl.drawing import Drawing
root = Element("{%s}wsDr" % SHEET_DRAWING_NS)
chart = DummyChart()
drawing = Drawing()
chart.drawing = drawing
self.dw._write_chart(root, chart, 1)
drawing_schema.assertValid(root)
xml = tostring(root)
expected = """<xdr:wsDr xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships"
xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart">
<xdr:absoluteAnchor>
<xdr:pos x="0" y="0"/>
<xdr:ext cx="200025" cy="1828800"/>
<xdr:graphicFrame macro="">
<xdr:nvGraphicFramePr>
<xdr:cNvPr id="2" name="Chart 1"/>
<xdr:cNvGraphicFramePr/>
</xdr:nvGraphicFramePr>
<xdr:xfrm>
<a:off x="0" y="0"/>
<a:ext cx="0" cy="0"/>
</xdr:xfrm>
<a:graphic>
<a:graphicData uri="http://schemas.openxmlformats.org/drawingml/2006/chart">
<c:chart xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" r:id="rId1"/>
</a:graphicData>
</a:graphic>
</xdr:graphicFrame>
<xdr:clientData/>
</xdr:absoluteAnchor>
</xdr:wsDr>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.lxml_required
@pytest.mark.pil_required
def test_write_images(self, ImageFile):
root = Element("{%s}wsDr" % SHEET_DRAWING_NS)
self.dw._write_image(root, ImageFile, 1)
drawing_schema.assertValid(root)
xml = tostring(root)
expected = """<xdr:wsDr xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing">
<xdr:absoluteAnchor>
<xdr:pos x="0" y="0"/>
<xdr:ext cx="1123950" cy="1123950"/>
<xdr:pic>
<xdr:nvPicPr>
<xdr:cNvPr id="2" name="Picture 1"/>
<xdr:cNvPicPr>
<a:picLocks noChangeArrowheads="1" noChangeAspect="1"/>
</xdr:cNvPicPr>
</xdr:nvPicPr>
<xdr:blipFill>
<a:blip xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" cstate="print" r:embed="rId1"/>
<a:srcRect/>
<a:stretch>
<a:fillRect/>
</a:stretch>
</xdr:blipFill>
<xdr:spPr bwMode="auto">
<a:xfrm>
<a:off x="0" y="0"/>
<a:ext cx="0" cy="0"/>
</a:xfrm>
<a:prstGeom prst="rect">
<a:avLst/>
</a:prstGeom>
<a:noFill/>
<a:ln w="1">
<a:noFill/>
<a:miter lim="800000"/>
<a:headEnd/>
<a:tailEnd len="med" type="none" w="med"/>
</a:ln>
<a:effectLst/>
</xdr:spPr>
</xdr:pic>
<xdr:clientData/>
</xdr:absoluteAnchor>
</xdr:wsDr>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.pil_required
def test_write_anchor(self, ImageFile):
drawing = ImageFile.drawing
root = Element("test")
self.dw._write_anchor(root, drawing)
xml = tostring(root)
expected = """<test><xdr:absoluteAnchor xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing"><xdr:pos x="0" y="0"/><xdr:ext cx="1123950" cy="1123950"/></xdr:absoluteAnchor></test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.mark.pil_required
def test_write_anchor_onecell(self, ImageFile):
drawing =ImageFile.drawing
drawing.anchortype = "oneCell"
drawing.anchorcol = 0
drawing.anchorrow = 0
root = Element("test")
self.dw._write_anchor(root, drawing)
xml = tostring(root)
expected = """<test><xdr:oneCellAnchor xmlns:xdr="http://schemas.openxmlformats.org/drawingml/2006/spreadsheetDrawing"><xdr:from><xdr:col>0</xdr:col><xdr:colOff>0</xdr:colOff><xdr:row>0</xdr:row><xdr:rowOff>0</xdr:rowOff></xdr:from><xdr:ext cx="1123950" cy="1123950"/></xdr:oneCellAnchor></test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_rels(self):
self.dw._sheet._charts.append(None)
self.dw._sheet._images.append(None)
xml = self.dw.write_rels(1, 1)
expected = """<Relationships xmlns="http://schemas.openxmlformats.org/package/2006/relationships">
<Relationship Id="rId1" Target="../charts/chart1.xml" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/chart"/>
<Relationship Id="rId1" Target="../media/image1.png" Type="http://schemas.openxmlformats.org/officeDocument/2006/relationships/image"/>
</Relationships>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
class TestShapeWriter(object):
def setup(self):
from openpyxl.writer.drawings import ShapeWriter
from openpyxl.drawing import Shape
chart = DummyChart()
self.shape = Shape(chart=chart, text="My first chart")
self.sw = ShapeWriter(shapes=[self.shape])
@pytest.mark.lxml_required
def test_write(self):
xml = self.sw.write(0)
tree = fromstring(xml)
chart_schema.assertValid(tree)
expected = """
<c:userShapes xmlns:c="http://schemas.openxmlformats.org/drawingml/2006/chart">
<cdr:relSizeAnchor xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing">
<cdr:from>
<cdr:x>1</cdr:x>
<cdr:y>1</cdr:y>
</cdr:from>
<cdr:to>
<cdr:x>1</cdr:x>
<cdr:y>1</cdr:y>
</cdr:to>
<cdr:sp macro="" textlink="">
<cdr:nvSpPr>
<cdr:cNvPr id="0" name="shape 0" />
<cdr:cNvSpPr />
</cdr:nvSpPr>
<cdr:spPr>
<a:xfrm xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:off x="0" y="0" />
<a:ext cx="0" cy="0" />
</a:xfrm>
<a:prstGeom prst="rect" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:avLst />
</a:prstGeom>
<a:solidFill xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:srgbClr val="FFFFFF" />
</a:solidFill>
<a:ln w="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:solidFill>
<a:srgbClr val="000000" />
</a:solidFill>
</a:ln>
</cdr:spPr>
<cdr:style>
<a:lnRef idx="2" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1">
<a:shade val="50000" />
</a:schemeClr>
</a:lnRef>
<a:fillRef idx="1" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1" />
</a:fillRef>
<a:effectRef idx="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="accent1" />
</a:effectRef>
<a:fontRef idx="minor" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:schemeClr val="lt1" />
</a:fontRef>
</cdr:style>
<cdr:txBody>
<a:bodyPr vertOverflow="clip" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" />
<a:lstStyle xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" />
<a:p xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main">
<a:r>
<a:rPr lang="en-US">
<a:solidFill>
<a:srgbClr val="000000" />
</a:solidFill>
</a:rPr>
<a:t>My first chart</a:t>
</a:r>
</a:p>
</cdr:txBody>
</cdr:sp>
</cdr:relSizeAnchor>
</c:userShapes>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_text(self):
root = Element("{%s}test" % CHART_DRAWING_NS)
self.sw._write_text(root, self.shape)
xml = tostring(root)
expected = """<cdr:test xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"><cdr:txBody><a:bodyPr vertOverflow="clip" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" /><a:lstStyle xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" /><a:p xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:r><a:rPr lang="en-US"><a:solidFill><a:srgbClr val="000000" /></a:solidFill></a:rPr><a:t>My first chart</a:t></a:r></a:p></cdr:txBody></cdr:test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_write_style(self):
root = Element("{%s}test" % CHART_DRAWING_NS)
self.sw._write_style(root)
xml = tostring(root)
expected = """<cdr:test xmlns:cdr="http://schemas.openxmlformats.org/drawingml/2006/chartDrawing"><cdr:style><a:lnRef idx="2" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1"><a:shade val="50000" /></a:schemeClr></a:lnRef><a:fillRef idx="1" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1" /></a:fillRef><a:effectRef idx="0" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="accent1" /></a:effectRef><a:fontRef idx="minor" xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main"><a:schemeClr val="lt1" /></a:fontRef></cdr:style></cdr:test>"""
diff = compare_xml(xml, expected)
assert diff is None, diff
|
|
import sys
from multiprocessing import Pool
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import connection as db_connection
from django.db.models import Q
from django.template import Context, Template
from .connections import connections
from .models import Email, EmailTemplate, PRIORITY, STATUS
from .settings import (get_available_backends, get_batch_size,
get_log_level, get_sending_order)
from .utils import (get_email_template, parse_emails, parse_priority,
split_emails, create_attachments)
from .logutils import setup_loghandlers
try:
from django.utils import timezone
now = timezone.now
except ImportError:
import datetime
now = datetime.datetime.now
logger = setup_loghandlers("INFO")
def create(sender, recipients=None, cc=None, bcc=None, subject='', message='',
html_message='', context=None, scheduled_time=None, headers=None,
template=None, priority=None, render_on_delivery=False, commit=True,
backend=''):
"""
Creates an email from supplied keyword arguments. If template is
specified, email subject and content will be rendered during delivery.
"""
priority = parse_priority(priority)
status = None if priority == PRIORITY.now else STATUS.queued
if recipients is None:
recipients = []
if cc is None:
cc = []
if bcc is None:
bcc = []
if context is None:
context = ''
# If email is to be rendered during delivery, save all necessary
# information
if render_on_delivery:
email = Email(
from_email=sender,
to=recipients,
cc=cc,
bcc=bcc,
scheduled_time=scheduled_time,
headers=headers, priority=priority, status=status,
context=context, template=template, backend_alias=backend
)
else:
if template:
subject = template.subject
message = template.content
html_message = template.html_content
_context = Context(context or {})
subject = Template(subject).render(_context)
message = Template(message).render(_context)
html_message = Template(html_message).render(_context)
email = Email(
from_email=sender,
to=recipients,
cc=cc,
bcc=bcc,
subject=subject,
message=message,
html_message=html_message,
scheduled_time=scheduled_time,
headers=headers, priority=priority, status=status,
backend_alias=backend
)
if commit:
email.save()
return email
def send(recipients=None, sender=None, template=None, context=None, subject='',
message='', html_message='', scheduled_time=None, headers=None,
priority=None, attachments=None, render_on_delivery=False,
log_level=None, commit=True, cc=None, bcc=None, language='',
backend=''):
try:
recipients = parse_emails(recipients)
except ValidationError as e:
raise ValidationError('recipients: %s' % e.message)
try:
cc = parse_emails(cc)
except ValidationError as e:
raise ValidationError('c: %s' % e.message)
try:
bcc = parse_emails(bcc)
except ValidationError as e:
raise ValidationError('bcc: %s' % e.message)
if sender is None:
sender = settings.DEFAULT_FROM_EMAIL
priority = parse_priority(priority)
if log_level is None:
log_level = get_log_level()
if not commit:
if priority == PRIORITY.now:
raise ValueError("send_many() can't be used with priority = 'now'")
if attachments:
raise ValueError("Can't add attachments with send_many()")
if template:
if subject:
raise ValueError('You can\'t specify both "template" and "subject" arguments')
if message:
raise ValueError('You can\'t specify both "template" and "message" arguments')
if html_message:
raise ValueError('You can\'t specify both "template" and "html_message" arguments')
# template can be an EmailTemplate instance or name
if isinstance(template, EmailTemplate):
template = template
# If language is specified, ensure template uses the right language
if language:
if template.language != language:
template = template.translated_templates.get(language=language)
else:
template = get_email_template(template, language)
if backend and backend not in get_available_backends().keys():
raise ValueError('%s is not a valid backend alias' % backend)
email = create(sender, recipients, cc, bcc, subject, message, html_message,
context, scheduled_time, headers, template, priority,
render_on_delivery, commit=commit, backend=backend)
if attachments:
attachments = create_attachments(attachments)
email.attachments.add(*attachments)
if priority == PRIORITY.now:
email.dispatch(log_level=log_level)
return email
def send_many(kwargs_list):
"""
Similar to mail.send(), but this function accepts a list of kwargs.
Internally, it uses Django's bulk_create command for efficiency reasons.
Currently send_many() can't be used to send emails with priority = 'now'.
"""
emails = []
for kwargs in kwargs_list:
emails.append(send(commit=False, **kwargs))
Email.objects.bulk_create(emails)
def get_queued():
"""
Returns a list of emails that should be sent:
- Status is queued
- Has scheduled_time lower than the current time or None
"""
return Email.objects.filter(status=STATUS.queued) \
.select_related('template') \
.filter(Q(scheduled_time__lte=now()) | Q(scheduled_time=None)) \
.order_by(*get_sending_order()).prefetch_related('attachments')[:get_batch_size()]
def send_queued(processes=1, log_level=None):
"""
Sends out all queued mails that has scheduled_time less than now or None
"""
queued_emails = get_queued()
total_sent, total_failed = 0, 0
total_email = len(queued_emails)
logger.info('Started sending %s emails with %s processes.' %
(total_email, processes))
if log_level is None:
log_level = get_log_level()
if queued_emails:
# Don't use more processes than number of emails
if total_email < processes:
processes = total_email
if processes == 1:
total_sent, total_failed = _send_bulk(queued_emails,
uses_multiprocessing=False,
log_level=log_level)
else:
email_lists = split_emails(queued_emails, processes)
pool = Pool(processes)
results = pool.map(_send_bulk, email_lists)
total_sent = sum([result[0] for result in results])
total_failed = sum([result[1] for result in results])
message = '%s emails attempted, %s sent, %s failed' % (
total_email,
total_sent,
total_failed
)
logger.info(message)
return (total_sent, total_failed)
def _send_bulk(emails, uses_multiprocessing=True, log_level=None):
# Multiprocessing does not play well with database connection
# Fix: Close connections on forking process
# https://groups.google.com/forum/#!topic/django-users/eCAIY9DAfG0
if uses_multiprocessing:
db_connection.close()
if log_level is None:
log_level = get_log_level()
sent_count, failed_count = 0, 0
email_count = len(emails)
logger.info('Process started, sending %s emails' % email_count)
try:
for email in emails:
status = email.dispatch(log_level=log_level,
disconnect_after_delivery=False)
if status == STATUS.sent:
sent_count += 1
logger.debug('Successfully sent email #%d' % email.id)
else:
failed_count += 1
logger.debug('Failed to send email #%d' % email.id)
except Exception as e:
logger.error(e, exc_info=sys.exc_info(), extra={'status_code': 500})
connections.close()
logger.info('Process finished, %s attempted, %s sent, %s failed' %
(email_count, sent_count, failed_count))
return (sent_count, failed_count)
|
|
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for host operations.
"""
import datetime
import os
import platform
import time
from oslo.config import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.virt.hyperv import constants
from nova.virt.hyperv import utilsfactory
CONF = cfg.CONF
CONF.import_opt('my_ip', 'nova.netconf')
LOG = logging.getLogger(__name__)
class HostOps(object):
def __init__(self):
self._stats = None
self._hostutils = utilsfactory.get_hostutils()
self._pathutils = utilsfactory.get_pathutils()
def _get_cpu_info(self):
"""Get the CPU information.
:returns: A dictionary containing the main properties
of the central processor in the hypervisor.
"""
cpu_info = dict()
processors = self._hostutils.get_cpus_info()
w32_arch_dict = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE
cpu_info['arch'] = w32_arch_dict.get(processors[0]['Architecture'],
'Unknown')
cpu_info['model'] = processors[0]['Name']
cpu_info['vendor'] = processors[0]['Manufacturer']
topology = dict()
topology['sockets'] = len(processors)
topology['cores'] = processors[0]['NumberOfCores']
topology['threads'] = (processors[0]['NumberOfLogicalProcessors'] /
processors[0]['NumberOfCores'])
cpu_info['topology'] = topology
features = list()
for fkey, fname in constants.PROCESSOR_FEATURE.items():
if self._hostutils.is_cpu_feature_present(fkey):
features.append(fname)
cpu_info['features'] = features
return cpu_info
def _get_memory_info(self):
(total_mem_kb, free_mem_kb) = self._hostutils.get_memory_info()
total_mem_mb = total_mem_kb / 1024
free_mem_mb = free_mem_kb / 1024
return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb)
def _get_local_hdd_info_gb(self):
drive = os.path.splitdrive(self._pathutils.get_instances_dir())[0]
(size, free_space) = self._hostutils.get_volume_info(drive)
total_gb = size / units.Gi
free_gb = free_space / units.Gi
used_gb = total_gb - free_gb
return (total_gb, free_gb, used_gb)
def _get_hypervisor_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
version = self._hostutils.get_windows_version().replace('.', '')
LOG.debug('Windows version: %s ', version)
return version
def get_available_resource(self):
"""Retrieve resource info.
This method is called when nova-compute launches, and
as part of a periodic task.
:returns: dictionary describing resources
"""
LOG.debug('get_available_resource called')
(total_mem_mb,
free_mem_mb,
used_mem_mb) = self._get_memory_info()
(total_hdd_gb,
free_hdd_gb,
used_hdd_gb) = self._get_local_hdd_info_gb()
cpu_info = self._get_cpu_info()
cpu_topology = cpu_info['topology']
vcpus = (cpu_topology['sockets'] *
cpu_topology['cores'] *
cpu_topology['threads'])
dic = {'vcpus': vcpus,
'memory_mb': total_mem_mb,
'memory_mb_used': used_mem_mb,
'local_gb': total_hdd_gb,
'local_gb_used': used_hdd_gb,
'hypervisor_type': "hyperv",
'hypervisor_version': self._get_hypervisor_version(),
'hypervisor_hostname': platform.node(),
'vcpus_used': 0,
'cpu_info': jsonutils.dumps(cpu_info),
'supported_instances': jsonutils.dumps(
[('i686', 'hyperv', 'hvm'),
('x86_64', 'hyperv', 'hvm')])
}
return dic
def _update_stats(self):
LOG.debug("Updating host stats")
(total_mem_mb, free_mem_mb, used_mem_mb) = self._get_memory_info()
(total_hdd_gb,
free_hdd_gb,
used_hdd_gb) = self._get_local_hdd_info_gb()
data = {}
data["disk_total"] = total_hdd_gb
data["disk_used"] = used_hdd_gb
data["disk_available"] = free_hdd_gb
data["host_memory_total"] = total_mem_mb
data["host_memory_overhead"] = used_mem_mb
data["host_memory_free"] = free_mem_mb
data["host_memory_free_computed"] = free_mem_mb
data["supported_instances"] = [('i686', 'hyperv', 'hvm'),
('x86_64', 'hyperv', 'hvm')]
data["hypervisor_hostname"] = platform.node()
self._stats = data
def get_host_stats(self, refresh=False):
"""Return the current state of the host.
If 'refresh' is True, run the update first.
"""
LOG.debug("get_host_stats called")
if refresh or not self._stats:
self._update_stats()
return self._stats
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
pass
def get_host_ip_addr(self):
host_ip = CONF.my_ip
if not host_ip:
# Return the first available address
host_ip = self._hostutils.get_local_ips()[0]
LOG.debug("Host IP address is: %s", host_ip)
return host_ip
def get_host_uptime(self):
"""Returns the host uptime."""
tick_count64 = self._hostutils.get_host_tick_count64()
# format the string to match libvirt driver uptime
# Libvirt uptime returns a combination of the following
# - curent host time
# - time since host is up
# - number of logged in users
# - cpu load
# Since the Windows function GetTickCount64 returns only
# the time since the host is up, returning 0s for cpu load
# and number of logged in users.
# This is done to ensure the format of the returned
# value is same as in libvirt
return "%s up %s, 0 users, load average: 0, 0, 0" % (
str(time.strftime("%H:%M:%S")),
str(datetime.timedelta(milliseconds=long(tick_count64))))
|
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
'', # 0xff
)
|
|
from django import forms
from django.contrib import admin
from django.contrib.admin import AdminSite
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.contenttypes.admin import GenericStackedInline
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.core import checks
from django.test import SimpleTestCase, override_settings
from .models import (
Album, Author, Book, City, Influence, Song, State, TwoAlbumFKAndAnE,
)
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class MyAdmin(admin.ModelAdmin):
def check(self, **kwargs):
return ['error!']
class AuthenticationMiddlewareSubclass(AuthenticationMiddleware):
pass
class MessageMiddlewareSubclass(MessageMiddleware):
pass
class ModelBackendSubclass(ModelBackend):
pass
class SessionMiddlewareSubclass(SessionMiddleware):
pass
@override_settings(
SILENCED_SYSTEM_CHECKS=['fields.W342'], # ForeignKey(unique=True)
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'admin_checks',
],
)
class SystemChecksTestCase(SimpleTestCase):
def test_checks_are_performed(self):
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
@override_settings(INSTALLED_APPS=['django.contrib.admin'])
def test_apps_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.contenttypes' must be in "
"INSTALLED_APPS in order to use the admin application.",
id="admin.E401",
),
checks.Error(
"'django.contrib.auth' must be in INSTALLED_APPS in order "
"to use the admin application.",
id='admin.E405',
),
checks.Error(
"'django.contrib.messages' must be in INSTALLED_APPS in order "
"to use the admin application.",
id='admin.E406',
),
]
self.assertEqual(errors, expected)
@override_settings(TEMPLATES=[])
def test_no_template_engines(self):
self.assertEqual(admin.checks.check_dependencies(), [
checks.Error(
"A 'django.template.backends.django.DjangoTemplates' "
"instance must be configured in TEMPLATES in order to use "
"the admin application.",
id='admin.E403',
)
])
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [],
},
}],
)
def test_context_processor_dependencies(self):
expected = [
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id='admin.E402',
),
checks.Error(
"'django.contrib.messages.context_processors.messages' must "
"be enabled in DjangoTemplates (TEMPLATES) in order to use "
"the admin application.",
id='admin.E404',
)
]
self.assertEqual(admin.checks.check_dependencies(), expected)
# The first error doesn't happen if
# 'django.contrib.auth.backends.ModelBackend' isn't in
# AUTHENTICATION_BACKENDS.
with self.settings(AUTHENTICATION_BACKENDS=[]):
self.assertEqual(admin.checks.check_dependencies(), expected[1:])
@override_settings(
AUTHENTICATION_BACKENDS=['admin_checks.tests.ModelBackendSubclass'],
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': ['django.contrib.messages.context_processors.messages'],
},
}],
)
def test_context_processor_dependencies_model_backend_subclass(self):
self.assertEqual(admin.checks.check_dependencies(), [
checks.Error(
"'django.contrib.auth.context_processors.auth' must be "
"enabled in DjangoTemplates (TEMPLATES) if using the default "
"auth backend in order to use the admin application.",
id='admin.E402',
),
])
@override_settings(
TEMPLATES=[
{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
'DIRS': [],
'APP_DIRS': True,
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
],
)
def test_several_templates_backends(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(MIDDLEWARE=[])
def test_middleware_dependencies(self):
errors = admin.checks.check_dependencies()
expected = [
checks.Error(
"'django.contrib.auth.middleware.AuthenticationMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id='admin.E408',
),
checks.Error(
"'django.contrib.messages.middleware.MessageMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id='admin.E409',
),
checks.Error(
"'django.contrib.sessions.middleware.SessionMiddleware' "
"must be in MIDDLEWARE in order to use the admin application.",
id='admin.E410',
),
]
self.assertEqual(errors, expected)
@override_settings(MIDDLEWARE=[
'admin_checks.tests.AuthenticationMiddlewareSubclass',
'admin_checks.tests.MessageMiddlewareSubclass',
'admin_checks.tests.SessionMiddlewareSubclass',
])
def test_middleware_subclasses(self):
self.assertEqual(admin.checks.check_dependencies(), [])
@override_settings(MIDDLEWARE=[
'django.contrib.does.not.Exist',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
])
def test_admin_check_ignores_import_error_in_middleware(self):
self.assertEqual(admin.checks.check_dependencies(), [])
def test_custom_adminsite(self):
class CustomAdminSite(admin.AdminSite):
pass
custom_site = CustomAdminSite()
custom_site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
custom_site.unregister(Song)
def test_allows_checks_relying_on_other_modeladmins(self):
class MyBookAdmin(admin.ModelAdmin):
def check(self, **kwargs):
errors = super().check(**kwargs)
author_admin = self.admin_site._registry.get(Author)
if author_admin is None:
errors.append('AuthorAdmin missing!')
return errors
class MyAuthorAdmin(admin.ModelAdmin):
pass
admin.site.register(Book, MyBookAdmin)
admin.site.register(Author, MyAuthorAdmin)
try:
self.assertEqual(admin.site.check(None), [])
finally:
admin.site.unregister(Book)
admin.site.unregister(Author)
def test_field_name_not_in_list_display(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ["original_release"]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not contained in 'list_display'.",
obj=SongAdmin,
id='admin.E122',
)
]
self.assertEqual(errors, expected)
def test_list_editable_not_a_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
list_editable = 'test'
self.assertEqual(SongAdmin(Song, AdminSite()).check(), [
checks.Error(
"The value of 'list_editable' must be a list or tuple.",
obj=SongAdmin,
id='admin.E120',
)
])
def test_list_editable_missing_field(self):
class SongAdmin(admin.ModelAdmin):
list_editable = ('test',)
self.assertEqual(SongAdmin(Song, AdminSite()).check(), [
checks.Error(
"The value of 'list_editable[0]' refers to 'test', which is "
"not an attribute of 'admin_checks.Song'.",
obj=SongAdmin,
id='admin.E121',
)
])
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin.",
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
The fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
"""
errors = ValidFormFieldsets(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_fieldsets_fields_non_tuple(self):
"""
The first fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": "title" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[0][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_nonfirst_fieldset(self):
"""
The second fieldset's fields must be a list/tuple.
"""
class NotATupleAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
"fields": ("title",)
}),
('foo', {
"fields": "author" # not a tuple
}),
]
errors = NotATupleAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1]['fields']' must be a list or tuple.",
obj=NotATupleAdmin,
id='admin.E008',
)
]
self.assertEqual(errors, expected)
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'.",
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_valid_generic_inline_model_admin(self):
"""
Regression test for #22034 - check that generic inlines don't look for
normal ForeignKey relations.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_generic_inline_model_admin_non_generic_model(self):
"""
A model without a GenericForeignKey raises problems if it's included
in a GenericInlineModelAdmin definition.
"""
class BookInline(GenericStackedInline):
model = Book
class SongAdmin(admin.ModelAdmin):
inlines = [BookInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Book' has no GenericForeignKey.",
obj=BookInline,
id='admin.E301',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_ct_field(self):
"""
A GenericInlineModelAdmin errors if the ct_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
obj=InfluenceInline,
id='admin.E302',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_bad_fk_field(self):
"""
A GenericInlineModelAdmin errors if the ct_fk_field points to a
nonexistent field.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'nonexistent'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'ct_fk_field' references 'nonexistent', which is not a field on 'admin_checks.Influence'.",
obj=InfluenceInline,
id='admin.E303',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_ct_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_field points to a
field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'name' and object ID field 'object_id'.",
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_generic_inline_model_admin_non_gfk_fk_field(self):
"""
A GenericInlineModelAdmin raises problems if the ct_fk_field points to
a field that isn't part of a GenericForeignKey.
"""
class InfluenceInline(GenericStackedInline):
model = Influence
ct_fk_field = 'name'
class SongAdmin(admin.ModelAdmin):
inlines = [InfluenceInline]
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.Influence' has no GenericForeignKey using "
"content type field 'content_type' and object ID field 'name'.",
obj=InfluenceInline,
id='admin.E304',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
class RawIdNonexistentAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexistent',)
errors = RawIdNonexistentAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"The value of 'raw_id_fields[0]' refers to 'nonexistent', "
"which is not an attribute of 'admin_checks.Album'.",
obj=RawIdNonexistentAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin(Album, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_dynamic_attribute_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("dynamic_method",)
def __getattr__(self, item):
if item == "dynamic_method":
def method(obj):
pass
return method
raise AttributeError
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_nonexistent_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistent")
errors = SongAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'.",
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistent_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline(State, AdminSite()).check()
expected = [
checks.Error(
"The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'.",
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_readonly_fields_not_list_or_tuple(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = 'test'
self.assertEqual(SongAdmin(Song, AdminSite()).check(), [
checks.Error(
"The value of 'readonly_fields' must be a list or tuple.",
obj=SongAdmin,
id='admin.E034',
)
])
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model.",
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin(Book, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model.",
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin(Book, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin(Song, AdminSite()).check()
self.assertEqual(errors, [])
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin(Song, AdminSite()).check()
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
def test_list_filter_works_on_through_field_even_when_apps_not_ready(self):
"""
Ensure list_filter can access reverse fields even when the app registry
is not ready; refs #24146.
"""
class BookAdminWithListFilter(admin.ModelAdmin):
list_filter = ['authorsbooks__featured']
# Temporarily pretending apps are not ready yet. This issue can happen
# if the value of 'list_filter' refers to a 'through__field'.
Book._meta.apps.ready = False
try:
errors = BookAdminWithListFilter(Book, AdminSite()).check()
self.assertEqual(errors, [])
finally:
Book._meta.apps.ready = True
|
|
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import sys
import shutil
import zipfile
import tarfile
import struct
import subprocess
import urllib.request
from contextlib import contextmanager
from distutils import spawn
from distutils.dist import Distribution
from urllib.error import HTTPError, URLError
@contextmanager
def TemporaryPath(path):
"""Context that substitutes the system path to test for API presence or absence"""
old_path = list(sys.path)
try:
sys.path = path + sys.path
yield
finally:
sys.path = old_path
class SolverInstaller(object):
SOLVER = None
def __init__(self, install_dir, bindings_dir, solver_version,
archive_name=None, native_link=None, mirror_link=None):
self.bindings_dir = bindings_dir
self.install_dir = install_dir
self.solver_version = solver_version
self.mirror_link = mirror_link
self.trials_404 = 3
self.base_dir = os.path.join(self.install_dir, self.SOLVER)
if not os.path.exists(self.base_dir):
os.mkdir(self.base_dir)
self.native_link = native_link
self.archive_name = archive_name
if self.archive_name is not None:
self.archive_path = os.path.join(self.base_dir, self.archive_name)
if self.archive_path.endswith(".tar.gz"):
self.extract_path = self.archive_path[:-7] # get rid of '.tar.gz'
elif self.archive_path.endswith(".tar.bz2"):
self.extract_path = self.archive_path[:-8] # get rid of '.tar.bz2'
elif self.archive_path.endswith(".zip"):
self.extract_path = self.archive_path[:-4] # get rid of '.zip'
else:
self.extract_path = None
else:
self.archive_path = None
self.extract_path = None
@property
def os_name(self):
return platform.system().lower()
@property
def architecture(self):
if self.bits == 64:
return "x86_64"
else:
return "x86"
@property
def bits(self):
return 8 * struct.calcsize("P")
@property
def python_version(self):
return "%d.%d" % sys.version_info[0:2]
def download_links(self):
if self.mirror_link is not None:
yield self.mirror_link.format(archive_name=self.archive_name, solver_version=self.solver_version)
if self.native_link is not None:
yield self.native_link.format(archive_name=self.archive_name, solver_version=self.solver_version)
def download(self):
"""Downloads the archive from one of the mirrors"""
if not os.path.exists(self.archive_path):
for turn in range(self.trials_404):
for i, link in enumerate(self.download_links()):
try:
return self.do_download(link, self.archive_path)
except HTTPError as e:
if e.code != 404:
raise
print("HTTP 404 while trying to get the archive using link" \
" '%s' (trial %d/%d)" % (link, turn+1, self.trials_404))
except URLError as e:
print("Error while trying to get the archive using link" \
" '%s' (trial %d/%d)" % (link, turn+1, self.trials_404))
raise e
def unpack(self):
"""Unpacks the archive"""
path = self.archive_path
if path.endswith(".zip"):
SolverInstaller.unzip(path, directory=self.base_dir)
elif path.endswith(".tar.bz2"):
SolverInstaller.untar(path, directory=self.base_dir, mode='r:bz2')
elif path.endswith(".tar.gz"):
SolverInstaller.untar(path, directory=self.base_dir)
else:
raise ValueError("Unsupported archive for extraction: %s" % path)
def compile(self):
"""Performs the compilation if needed"""
pass
def move(self):
"""Moves relevant files in bindings_dir"""
pass
def install(self, force_redo=False):
"""Performs the installation of the solver"""
if (not force_redo) and self.is_installed():
return True
if force_redo:
SolverInstaller.clean_dir(self.base_dir)
self.download()
self.unpack()
self.compile()
self.move()
return
def is_installed(self):
"""Checks if the solver is installed and usable"""
ver = self.get_installed_version()
return (ver is not None) and (ver == self.solver_version)
def get_installed_version(self):
"""Returns a string representing the version of the solver currently
installed or None if the solver is not found"""
return None
@staticmethod
def do_download(url, file_name):
"""Downloads the given url into the given file name"""
u = urllib.request.urlopen(url)
f = open(file_name, 'wb')
meta = u.info()
if meta.get("Content-Length") and len(meta.get("Content-Length")) > 0:
file_size = int(meta.get("Content-Length"))
print("Downloading: %s Bytes: %s" % (file_name, file_size))
block_sz = 8192
count = 0
while True:
buff = u.read(block_sz)
if not buff:
break
f.write(buff)
if meta.get("Content-Length") and len(meta.get("Content-Length")) > 0 \
and sys.stdout.isatty():
count += len(buff)
perc = (float(count) / float(file_size)) * 100.0
str_perc = "%.1f%%" % perc
sys.stdout.write('\r')
sys.stdout.write(str_perc)
sys.stdout.write(" " * (10 - len(str_perc)))
print("")
f.close()
return True
@staticmethod
def run_python(script, directory=None, env_variables=None, get_output=False):
"""Executes a python script"""
interpreter = 'python'
if sys.executable:
interpreter = sys.executable
cmd = '{interpreter} {script}'.format(interpreter=interpreter,
script=script)
return SolverInstaller.run(cmd, directory=directory,
env_variables=env_variables,
get_output=get_output)
@staticmethod
def run(program, directory=None, env_variables=None, get_output=False,
suppress_stderr=False):
"""Executes an arbitrary program"""
environment = os.environ.copy()
if env_variables is not None:
for k,v in env_variables.items():
environment[k] = v
stderr = None
if suppress_stderr:
stderr = open(os.devnull, 'w')
if isinstance(program, str):
program = program.split()
if get_output:
output = subprocess.check_output(program,
env=environment,
cwd=directory,
stderr=stderr)
if suppress_stderr:
stderr.close()
return output.decode("ascii")
else:
subprocess.check_call(program, env=environment,
cwd=directory, stderr=stderr)
if suppress_stderr:
stderr.close()
@staticmethod
def clean_dir(path):
"""Empties a (possibly non-existent) directory"""
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
@staticmethod
def mv(source, dest):
"""Similarly to the UNIX mv command, moves / renames source_file in
dest (if dest is a file name) otherwise moves source_file in
the directory dest
"""
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(source))
if os.path.isdir(source):
if os.path.exists(dest):
if os.path.isdir(dest):
shutil.rmtree(dest, ignore_errors=True)
else:
os.unlink(dest)
shutil.copytree(source, dest, symlinks=True)
shutil.rmtree(source, ignore_errors=True)
else:
shutil.copy(source, dest)
os.unlink(source)
@staticmethod
def untar(fname, directory, mode='r:gz'):
"""Extracts the tarfile using the specified mode in the given directory."""
tfile = tarfile.open(fname, mode)
tfile.extractall(directory)
@staticmethod
def unzip(fname, directory):
"""Unzips the given archive into the given directory"""
myzip = zipfile.ZipFile(fname, "r")
myzip.extractall(directory)
myzip.close()
def get_installed_version_script(self, bindings_dir, package):
check_version_script = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"..",
"check_version.py"))
env = {}
for k in ["LD_LIBRARY_PATH", "PATH", "PYTHONPATH"]:
if k in os.environ:
env[k] = bindings_dir + os.pathsep + os.environ[k]
else:
env[k] = bindings_dir
try:
output = self.run_python("%s %s" % (check_version_script, package),
env_variables=env,
get_output=True)
output = output.strip()
except Exception as ex:
print("Error while checking %s" % package)
return None
if output == "NOT INSTALLED":
return None
return output
def find_python_config(self):
command_tplate = 'python%s-config'
alternatives = [self.python_version, '']
command = None
for alt in alternatives:
name = command_tplate % alt
command = spawn.find_executable(name)
if command is not None:
break
return command
def package_install_site(name='', user=False, plat_specific=False):
"""pip-inspired, distutils-based method for fetching the
default install location (site-packages path).
Returns virtual environment or system site-packages, unless
`user=True` in which case returns user-site (typ. under `~/.local/
on linux).
If there's a distinction (on a particular system) between platform
specific and pure python package locations, set `plat_specific=True`
to retrieve the former.
"""
dist = Distribution({'name': name})
dist.parse_config_files()
inst = dist.get_command_obj('install', create=True)
# NOTE: specifying user=True will create user-site
if user:
inst.user = user
inst.prefix = ""
inst.finalize_options()
# platform-specific site vs. purelib (platform-independent) site
if plat_specific:
loc = inst.install_platlib
else:
loc = inst.install_purelib
# install_lib specified in setup.cfg has highest precedence
if 'install_lib' in dist.get_option_dict('install'):
loc = inst.install_lib
return loc
def running_under_virtualenv():
"""
Return True if we're running inside a virtualenv, False otherwise.
Note: copied from pip.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def solver_install_site(plat_specific=False):
"""Determine solver's install site similarly to pip behaviour on Debian."""
# install to local user-site, unless in virtualenv or running as root
default_user = True
if running_under_virtualenv():
default_user = False
try:
if os.geteuid() == 0:
default_user = False
except:
# getuid/geteuid not supported on windows
pass
return package_install_site(user=default_user, plat_specific=plat_specific)
|
|
"""Solvers for Ridge and LogisticRegression using SAG algorithm"""
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..exceptions import ConvergenceWarning
from ..utils import check_array
from ..utils.extmath import row_norms
from .base import make_dataset
from .sag_fast import sag
def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept):
"""Compute automatic step size for SAG solver
The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is
the max sum of squares for over all samples.
Parameters
----------
max_squared_sum : float
Maximum squared sum of X over samples.
alpha_scaled : float
Constant that multiplies the regularization term, scaled by
1. / n_samples, the number of samples.
loss : string, in {"log", "squared"}
The loss function used in SAG solver.
fit_intercept : bool
Specifies if a constant (a.k.a. bias or intercept) will be
added to the decision function.
Returns
-------
step_size : float
Step size used in SAG solver.
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
"""
if loss in ('log', 'multinomial'):
# inverse Lipschitz constant for log loss
return 4.0 / (max_squared_sum + int(fit_intercept)
+ 4.0 * alpha_scaled)
elif loss == 'squared':
# inverse Lipschitz constant for squared loss
return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled)
else:
raise ValueError("Unknown loss function for SAG solver, got %s "
"instead of 'log' or 'squared'" % loss)
def sag_solver(X, y, sample_weight=None, loss='log', alpha=1.,
max_iter=1000, tol=0.001, verbose=0, random_state=None,
check_input=True, max_squared_sum=None,
warm_start_mem=None):
"""SAG solver for Ridge and LogisticRegression
SAG stands for Stochastic Average Gradient: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a constant learning rate.
IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the
same scale. You can normalize the data by using
sklearn.preprocessing.StandardScaler on your data before passing it to the
fit method.
This implementation works with data represented as dense numpy arrays or
sparse scipy arrays of floating point values for the features. It will
fit the data according to squared loss or log loss.
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using the squared euclidean norm L2.
.. versionadded:: 0.17
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values. With loss='multinomial', y must be label encoded
(see preprocessing.LabelEncoder).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
loss : 'log' | 'squared' | 'multinomial'
Loss function that will be optimized:
-'log' is the binary logistic loss, as used in LogisticRegression.
-'squared' is the squared loss, as used in Ridge.
-'multinomial' is the multinomial logistic loss, as used in
LogisticRegression.
.. versionadded:: 0.18
*loss='multinomial'*
alpha : float, optional
Constant that multiplies the regularization term. Defaults to 1.
max_iter: int, optional
The max number of passes over the training data if the stopping
criterea is not reached. Defaults to 1000.
tol: double, optional
The stopping criterea for the weights. The iterations will stop when
max(change in weights) / max(weights) < tol. Defaults to .001
verbose: integer, optional
The verbosity level.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. If None, it will be computed,
going through all the samples. The value should be precomputed
to speed up cross validation.
warm_start_mem: dict, optional
The initialization parameters used for warm starting. Warm starting is
currently used in LogisticRegression but not in Ridge.
It contains:
- 'coef': the weight vector, with the intercept in last line
if the intercept is fitted.
- 'gradient_memory': the scalar gradient for all seen samples.
- 'sum_gradient': the sum of gradient over all seen samples,
for each feature.
- 'intercept_sum_gradient': the sum of gradient over all seen
samples, for the intercept.
- 'seen': array of boolean describing the seen samples.
- 'num_seen': the number of seen samples.
Returns
-------
coef_ : array, shape (n_features)
Weight vector.
n_iter_ : int
The number of full pass on all samples.
warm_start_mem : dict
Contains a 'coef' key with the fitted result, and possibly the
fitted intercept at the end of the array. Contains also other keys
used for warm starting.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> X = np.random.randn(n_samples, n_features)
>>> y = np.random.randn(n_samples)
>>> clf = linear_model.Ridge(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='sag', tol=0.001)
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> clf = linear_model.LogisticRegression(solver='sag')
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
LogisticRegression(C=1.0, class_weight=None, dual=False,
fit_intercept=True, intercept_scaling=1, max_iter=100,
multi_class='ovr', n_jobs=1, penalty='l2', random_state=None,
solver='sag', tol=0.0001, verbose=0, warm_start=False)
References
----------
Schmidt, M., Roux, N. L., & Bach, F. (2013).
Minimizing finite sums with the stochastic average gradient
https://hal.inria.fr/hal-00860051/PDF/sag_journal.pdf
See also
--------
Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and
LogisticRegression, SGDClassifier, LinearSVC, Perceptron
"""
if warm_start_mem is None:
warm_start_mem = {}
# Ridge default max_iter is None
if max_iter is None:
max_iter = 1000
if check_input:
X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='C')
n_samples, n_features = X.shape[0], X.shape[1]
# As in SGD, the alpha is scaled by n_samples.
alpha_scaled = float(alpha) / n_samples
# if loss == 'multinomial', y should be label encoded.
n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1
# initialization
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
if 'coef' in warm_start_mem.keys():
coef_init = warm_start_mem['coef']
else:
# assume fit_intercept is False
coef_init = np.zeros((n_features, n_classes), dtype=np.float64,
order='C')
# coef_init contains possibly the intercept_init at the end.
# Note that Ridge centers the data before fitting, so fit_intercept=False.
fit_intercept = coef_init.shape[0] == (n_features + 1)
if fit_intercept:
intercept_init = coef_init[-1, :]
coef_init = coef_init[:-1, :]
else:
intercept_init = np.zeros(n_classes, dtype=np.float64)
if 'intercept_sum_gradient' in warm_start_mem.keys():
intercept_sum_gradient = warm_start_mem['intercept_sum_gradient']
else:
intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64)
if 'gradient_memory' in warm_start_mem.keys():
gradient_memory_init = warm_start_mem['gradient_memory']
else:
gradient_memory_init = np.zeros((n_samples, n_classes),
dtype=np.float64, order='C')
if 'sum_gradient' in warm_start_mem.keys():
sum_gradient_init = warm_start_mem['sum_gradient']
else:
sum_gradient_init = np.zeros((n_features, n_classes),
dtype=np.float64, order='C')
if 'seen' in warm_start_mem.keys():
seen_init = warm_start_mem['seen']
else:
seen_init = np.zeros(n_samples, dtype=np.int32, order='C')
if 'num_seen' in warm_start_mem.keys():
num_seen_init = warm_start_mem['num_seen']
else:
num_seen_init = 0
dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state)
if max_squared_sum is None:
max_squared_sum = row_norms(X, squared=True).max()
step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss,
fit_intercept)
if step_size * alpha_scaled == 1:
raise ZeroDivisionError("Current sag implementation does not handle "
"the case step_size * alpha_scaled == 1")
num_seen, n_iter_ = sag(dataset, coef_init,
intercept_init, n_samples,
n_features, n_classes, tol,
max_iter,
loss,
step_size, alpha_scaled,
sum_gradient_init,
gradient_memory_init,
seen_init,
num_seen_init,
fit_intercept,
intercept_sum_gradient,
intercept_decay,
verbose)
if n_iter_ == max_iter:
warnings.warn("The max_iter was reached which means "
"the coef_ did not converge", ConvergenceWarning)
if fit_intercept:
coef_init = np.vstack((coef_init, intercept_init))
warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init,
'intercept_sum_gradient': intercept_sum_gradient,
'gradient_memory': gradient_memory_init,
'seen': seen_init, 'num_seen': num_seen}
if loss == 'multinomial':
coef_ = coef_init.T
else:
coef_ = coef_init[:, 0]
return coef_, n_iter_, warm_start_mem
|
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1VirtualMachinePool(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'K8sIoApimachineryPkgApisMetaV1ObjectMeta',
'spec': 'V1alpha1VirtualMachinePoolSpec',
'status': 'V1alpha1VirtualMachinePoolStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1alpha1VirtualMachinePool - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1alpha1VirtualMachinePool.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:return: The api_version of this V1alpha1VirtualMachinePool.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1alpha1VirtualMachinePool.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param api_version: The api_version of this V1alpha1VirtualMachinePool.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1alpha1VirtualMachinePool.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:return: The kind of this V1alpha1VirtualMachinePool.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1alpha1VirtualMachinePool.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param kind: The kind of this V1alpha1VirtualMachinePool.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1alpha1VirtualMachinePool.
:return: The metadata of this V1alpha1VirtualMachinePool.
:rtype: K8sIoApimachineryPkgApisMetaV1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1alpha1VirtualMachinePool.
:param metadata: The metadata of this V1alpha1VirtualMachinePool.
:type: K8sIoApimachineryPkgApisMetaV1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1alpha1VirtualMachinePool.
:return: The spec of this V1alpha1VirtualMachinePool.
:rtype: V1alpha1VirtualMachinePoolSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1alpha1VirtualMachinePool.
:param spec: The spec of this V1alpha1VirtualMachinePool.
:type: V1alpha1VirtualMachinePoolSpec
"""
if spec is None:
raise ValueError("Invalid value for `spec`, must not be `None`")
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1alpha1VirtualMachinePool.
:return: The status of this V1alpha1VirtualMachinePool.
:rtype: V1alpha1VirtualMachinePoolStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1alpha1VirtualMachinePool.
:param status: The status of this V1alpha1VirtualMachinePool.
:type: V1alpha1VirtualMachinePoolStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1VirtualMachinePool):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
import os
import glob
import re
import shutil
import sqlalchemy
import traceback
import importlib
from rapidfuzz import fuzz
from traitlets.config import LoggingConfigurable, Config
from traitlets import Bool, List, Dict, Integer, Instance, Type, Any
from traitlets import default, validate
from textwrap import dedent
from nbconvert.exporters import Exporter, NotebookExporter
from nbconvert.writers import FilesWriter
from ..coursedir import CourseDirectory
from ..utils import find_all_files, rmtree, remove
from ..preprocessors.execute import UnresponsiveKernelError
from ..nbgraderformat import SchemaTooOldError, SchemaTooNewError
import typing
from nbconvert.exporters.exporter import ResourcesDict
class NbGraderException(Exception):
pass
class BaseConverter(LoggingConfigurable):
notebooks = List([])
assignments = Dict({})
writer = Instance(FilesWriter)
exporter = Instance(Exporter)
exporter_class = Type(NotebookExporter, klass=Exporter).tag(config=True)
preprocessors = List([])
force = Bool(False, help="Whether to overwrite existing assignments/submissions").tag(config=True)
pre_convert_hook = Any(
None,
config=True,
allow_none=True,
help=dedent("""
An optional hook function that you can implement to do some
bootstrapping work before converting.
This function is called before the notebooks are converted
and should be used for specific converters such as Autograde,
GenerateAssignment or GenerateFeedback.
It will be called as (all arguments are passed as keywords)::
hook(assignment=assignment, student=student, notebooks=notebooks)
""")
)
post_convert_hook = Any(
None,
config=True,
allow_none=True,
help=dedent("""
An optional hook function that you can implement to do some
work after converting.
This function is called after the notebooks are converted
and should be used for specific converters such as Autograde,
GenerateAssignment or GenerateFeedback.
It will be called as (all arguments are passed as keywords)::
hook(assignment=assignment, student=student, notebooks=notebooks)
""")
)
permissions = Integer(
help=dedent(
"""
Permissions to set on files output by nbgrader. The default is
generally read-only (444), with the exception of nbgrader
generate_assignment and nbgrader generate_feedback, in which case
the user also has write permission.
"""
)
).tag(config=True)
@default("permissions")
def _permissions_default(self) -> int:
return 664 if self.coursedir.groupshared else 444
@validate('pre_convert_hook')
def _validate_pre_convert_hook(self, proposal):
value = proposal['value']
if isinstance(value, str):
module, function = value.rsplit('.', 1)
value = getattr(importlib.import_module(module), function)
if not callable(value):
raise TraitError("pre_convert_hook must be callable")
return value
@validate('post_convert_hook')
def _validate_post_convert_hook(self, proposal):
value = proposal['value']
if isinstance(value, str):
module, function = value.rsplit('.', 1)
value = getattr(importlib.import_module(module), function)
if not callable(value):
raise TraitError("post_convert_hook must be callable")
return value
coursedir = Instance(CourseDirectory, allow_none=True)
def __init__(self, coursedir: CourseDirectory = None, **kwargs: typing.Any) -> None:
self.coursedir = coursedir
super(BaseConverter, self).__init__(**kwargs)
if self.parent and hasattr(self.parent, "logfile"):
self.logfile = self.parent.logfile
else:
self.logfile = None
c = Config()
c.Exporter.default_preprocessors = []
self.update_config(c)
def start(self) -> None:
self.init_notebooks()
self.writer = FilesWriter(parent=self, config=self.config)
self.exporter = self.exporter_class(parent=self, config=self.config)
for pp in self.preprocessors:
self.exporter.register_preprocessor(pp)
currdir = os.getcwd()
os.chdir(self.coursedir.root)
try:
self.convert_notebooks()
finally:
os.chdir(currdir)
@default("classes")
def _classes_default(self):
classes = super(BaseConverter, self)._classes_default()
classes.append(FilesWriter)
classes.append(Exporter)
for pp in self.preprocessors:
if len(pp.class_traits(config=True)) > 0:
classes.append(pp)
return classes
@property
def _input_directory(self):
raise NotImplementedError
@property
def _output_directory(self):
raise NotImplementedError
def _format_source(self, assignment_id: str, student_id: str, escape: bool = False) -> str:
return self.coursedir.format_path(self._input_directory, student_id, assignment_id, escape=escape)
def _format_dest(self, assignment_id: str, student_id: str, escape: bool = False) -> str:
return self.coursedir.format_path(self._output_directory, student_id, assignment_id, escape=escape)
def init_notebooks(self) -> None:
self.assignments = {}
self.notebooks = []
assignment_glob = self._format_source(self.coursedir.assignment_id, self.coursedir.student_id)
for assignment in glob.glob(assignment_glob):
notebook_glob = os.path.join(assignment, self.coursedir.notebook_id + ".ipynb")
found = glob.glob(notebook_glob)
if len(found) == 0:
self.log.warning("No notebooks were matched by '%s'", notebook_glob)
continue
self.assignments[assignment] = found
if len(self.assignments) == 0:
msg = "No notebooks were matched by '%s'" % assignment_glob
self.log.error(msg)
assignment_glob2 = self._format_source("*", self.coursedir.student_id)
found = glob.glob(assignment_glob2)
if found:
scores = sorted([(fuzz.ratio(assignment_glob, x), x) for x in found])
self.log.error("Did you mean: %s", scores[-1][1])
raise NbGraderException(msg)
def init_single_notebook_resources(self, notebook_filename: str) -> typing.Dict[str, typing.Any]:
regexp = re.escape(os.path.sep).join([
self._format_source("(?P<assignment_id>.*)", "(?P<student_id>.*)", escape=True),
"(?P<notebook_id>.*).ipynb"
])
m = re.match(regexp, notebook_filename)
if m is None:
msg = "Could not match '%s' with regexp '%s'" % (notebook_filename, regexp)
self.log.error(msg)
raise NbGraderException(msg)
gd = m.groupdict()
self.log.debug("Student: %s", gd['student_id'])
self.log.debug("Assignment: %s", gd['assignment_id'])
self.log.debug("Notebook: %s", gd['notebook_id'])
resources = {}
resources['unique_key'] = gd['notebook_id']
resources['output_files_dir'] = '%s_files' % gd['notebook_id']
resources['nbgrader'] = {}
resources['nbgrader']['student'] = gd['student_id']
resources['nbgrader']['assignment'] = gd['assignment_id']
resources['nbgrader']['notebook'] = gd['notebook_id']
resources['nbgrader']['db_url'] = self.coursedir.db_url
return resources
def write_single_notebook(self, output: str, resources: ResourcesDict) -> None:
# configure the writer build directory
self.writer.build_directory = self._format_dest(
resources['nbgrader']['assignment'], resources['nbgrader']['student'])
# write out the results
self.writer.write(output, resources, notebook_name=resources['unique_key'])
def init_destination(self, assignment_id: str, student_id: str) -> bool:
"""Initialize the destination for an assignment. Returns whether the
assignment should actually be processed or not (i.e. whether the
initialization was successful).
"""
if self.coursedir.student_id_exclude:
exclude_ids = self.coursedir.student_id_exclude.split(',')
if student_id in exclude_ids:
return False
dest = os.path.normpath(self._format_dest(assignment_id, student_id))
# the destination doesn't exist, so we haven't processed it
if self.coursedir.notebook_id == "*":
if not os.path.exists(dest):
return True
else:
# if any of the notebooks don't exist, then we want to process them
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if not os.path.exists(path):
return True
# if we have specified --force, then always remove existing stuff
if self.force:
if self.coursedir.notebook_id == "*":
self.log.warning("Removing existing assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Removing existing notebook: {}".format(path))
remove(path)
return True
src = self._format_source(assignment_id, student_id)
new_timestamp = self.coursedir.get_existing_timestamp(src)
old_timestamp = self.coursedir.get_existing_timestamp(dest)
# if --force hasn't been specified, but the source assignment is newer,
# then we want to overwrite it
if new_timestamp is not None and old_timestamp is not None and new_timestamp > old_timestamp:
if self.coursedir.notebook_id == "*":
self.log.warning("Updating existing assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Updating existing notebook: {}".format(path))
remove(path)
return True
# otherwise, we should skip the assignment
self.log.info("Skipping existing assignment: {}".format(dest))
return False
def init_assignment(self, assignment_id: str, student_id: str) -> None:
"""Initializes resources/dependencies/etc. that are common to all
notebooks in an assignment.
"""
source = self._format_source(assignment_id, student_id)
dest = self._format_dest(assignment_id, student_id)
# detect other files in the source directory
for filename in find_all_files(source, self.coursedir.ignore + ["*.ipynb"]):
# Make sure folder exists.
path = os.path.join(dest, os.path.relpath(filename, source))
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if os.path.exists(path):
remove(path)
self.log.info("Copying %s -> %s", filename, path)
shutil.copy(filename, path)
def set_permissions(self, assignment_id: str, student_id: str) -> None:
self.log.info("Setting destination file permissions to %s", self.permissions)
dest = os.path.normpath(self._format_dest(assignment_id, student_id))
permissions = int(str(self.permissions), 8)
for dirname, _, filenames in os.walk(dest):
for filename in filenames:
os.chmod(os.path.join(dirname, filename), permissions)
# If groupshared, set dir permissions - see comment below.
st_mode = os.stat(dirname).st_mode
if self.coursedir.groupshared and st_mode & 0o2770 != 0o2770:
try:
os.chmod(dirname, (st_mode|0o2770) & 0o2777)
except PermissionError:
self.log.warning("Could not update permissions of %s to make it groupshared", dirname)
# If groupshared, set write permissions on directories. Directories
# are created within ipython_genutils.path.ensure_dir_exists via
# nbconvert.writer, (unless there are supplementary files) with a
# default mode of 755 and there is no way to pass the mode arguments
# all the way to there! So we have to walk and fix.
if self.coursedir.groupshared:
# Root may be created in this step, and is not included above.
rootdir = self.coursedir.format_path(self._output_directory, '.', '.')
# Add 2770 to existing dir permissions (don't unconditionally override)
st_mode = os.stat(rootdir).st_mode
if st_mode & 0o2770 != 0o2770:
try:
os.chmod(rootdir, (st_mode|0o2770) & 0o2777)
except PermissionError:
self.log.warning("Could not update permissions of %s to make it groupshared", rootdir)
def convert_single_notebook(self, notebook_filename: str) -> None:
"""
Convert a single notebook.
Performs the following steps:
1. Initialize notebook resources
2. Export the notebook to a particular format
3. Write the exported notebook to file
"""
self.log.info("Converting notebook %s", notebook_filename)
resources = self.init_single_notebook_resources(notebook_filename)
output, resources = self.exporter.from_filename(notebook_filename, resources=resources)
self.write_single_notebook(output, resources)
def convert_notebooks(self) -> None:
errors = []
def _handle_failure(gd: typing.Dict[str, str]) -> None:
dest = os.path.normpath(self._format_dest(gd['assignment_id'], gd['student_id']))
if self.coursedir.notebook_id == "*":
if os.path.exists(dest):
self.log.warning("Removing failed assignment: {}".format(dest))
rmtree(dest)
else:
for notebook in self.notebooks:
filename = os.path.splitext(os.path.basename(notebook))[0] + self.exporter.file_extension
path = os.path.join(dest, filename)
if os.path.exists(path):
self.log.warning("Removing failed notebook: {}".format(path))
remove(path)
for assignment in sorted(self.assignments.keys()):
# initialize the list of notebooks and the exporter
self.notebooks = sorted(self.assignments[assignment])
# parse out the assignment and student ids
regexp = self._format_source("(?P<assignment_id>.*)", "(?P<student_id>.*)", escape=True)
m = re.match(regexp, assignment)
if m is None:
msg = "Could not match '%s' with regexp '%s'" % (assignment, regexp)
self.log.error(msg)
raise NbGraderException(msg)
gd = m.groupdict()
try:
# determine whether we actually even want to process this submission
should_process = self.init_destination(gd['assignment_id'], gd['student_id'])
if not should_process:
continue
self.run_pre_convert_hook()
# initialize the destination
self.init_assignment(gd['assignment_id'], gd['student_id'])
# convert all the notebooks
for notebook_filename in self.notebooks:
self.convert_single_notebook(notebook_filename)
# set assignment permissions
self.set_permissions(gd['assignment_id'], gd['student_id'])
self.run_post_convert_hook()
except UnresponsiveKernelError:
self.log.error(
"While processing assignment %s, the kernel became "
"unresponsive and we could not interrupt it. This probably "
"means that the students' code has an infinite loop that "
"consumes a lot of memory or something similar. nbgrader "
"doesn't know how to deal with this problem, so you will "
"have to manually edit the students' code (for example, to "
"just throw an error rather than enter an infinite loop). ",
assignment)
errors.append((gd['assignment_id'], gd['student_id']))
_handle_failure(gd)
except sqlalchemy.exc.OperationalError:
_handle_failure(gd)
self.log.error(traceback.format_exc())
msg = (
"There was an error accessing the nbgrader database. This "
"may occur if you recently upgraded nbgrader. To resolve "
"the issue, first BACK UP your database and then run the "
"command `nbgrader db upgrade`."
)
self.log.error(msg)
raise NbGraderException(msg)
except SchemaTooOldError:
_handle_failure(gd)
msg = (
"One or more notebooks in the assignment use an old version \n"
"of the nbgrader metadata format. Please **back up your class files \n"
"directory** and then update the metadata using:\n\nnbgrader update .\n"
)
self.log.error(msg)
raise NbGraderException(msg)
except SchemaTooNewError:
_handle_failure(gd)
msg = (
"One or more notebooks in the assignment use an newer version \n"
"of the nbgrader metadata format. Please update your version of \n"
"nbgrader to the latest version to be able to use this notebook.\n"
)
self.log.error(msg)
raise NbGraderException(msg)
except KeyboardInterrupt:
_handle_failure(gd)
self.log.error("Canceled")
raise
except Exception:
self.log.error("There was an error processing assignment: %s", assignment)
self.log.error(traceback.format_exc())
errors.append((gd['assignment_id'], gd['student_id']))
_handle_failure(gd)
if len(errors) > 0:
for assignment_id, student_id in errors:
self.log.error(
"There was an error processing assignment '{}' for student '{}'".format(
assignment_id, student_id))
if self.logfile:
msg = (
"Please see the error log ({}) for details on the specific "
"errors on the above failures.".format(self.logfile))
else:
msg = (
"Please see the the above traceback for details on the specific "
"errors on the above failures.")
self.log.error(msg)
raise NbGraderException(msg)
def run_pre_convert_hook(self):
if self.pre_convert_hook:
self.log.info('Running pre-convert hook')
try:
self.pre_convert_hook(
assignment=self.coursedir.assignment_id,
student=self.coursedir.student_id,
notebooks=self.notebooks)
except Exception:
self.log.info('Pre-convert hook failed', exc_info=True)
def run_post_convert_hook(self):
if self.post_convert_hook:
self.log.info('Running post-convert hook')
try:
self.post_convert_hook(
assignment=self.coursedir.assignment_id,
student=self.coursedir.student_id,
notebooks=self.notebooks)
except Exception:
self.log.info('Post-convert hook failed', exc_info=True)
|
|
from nltk.tree import Tree
from np_shallow_neural_classifier import ShallowNeuralClassifier
from np_rnn_classifier import RNNClassifier
from np_autoencoder import Autoencoder
from np_tree_nn import TreeNN
import numpy as np
import pytest
import utils
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Spring 2021"
utils.fix_random_seeds()
class GradientCheckError(Exception):
"""Raised if a gradient check fails."""
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_shallow_neural_classifier_gradients(hidden_activation, d_hidden_activation):
model = ShallowNeuralClassifier(
max_iter=10,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
X = utils.randmatrix(5, 2)
y = np.random.choice((0,1), 5)
model.fit(X, y)
# Use the first example for the check:
ex = X[0]
label = model._onehot_encode([y[0]])[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b_hy, d_W_xh, d_b_xh = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_hy', d_W_hy),
('b_hy', d_b_hy),
('W_xh', d_W_xh),
('b_xh', d_b_xh)
)
gradient_check(param_pairs, model, ex, label)
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_rnn_classifier(hidden_activation, d_hidden_activation):
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
vocab = ['a', 'b', '$UNK']
data = [
[list('ab'), 'good'],
[list('aab'), 'good'],
[list('abb'), 'good']]
model = RNNClassifier(
vocab,
max_iter=10,
hidden_dim=2,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
X, y = zip(*data)
model.fit(X, y)
# Use the first example for the check:
ex = X[0]
label = model._onehot_encode([y[0]])[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b, d_W_hh, d_W_xh = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_xh', d_W_xh),
('W_hh', d_W_hh),
('W_hy', d_W_hy),
('b', d_b)
)
gradient_check(param_pairs, model, ex, label)
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_autoencoder(hidden_activation, d_hidden_activation):
model = Autoencoder(
max_iter=10,
hidden_dim=2,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
X = utils.randmatrix(5, 5)
model.fit(X)
# Use the first example for the check:
ex = X[0]
label = X[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b_hy, d_W_xh, d_b_xh = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_hy', d_W_hy),
('b_hy', d_b_hy),
('W_xh', d_W_xh),
('b_xh', d_b_xh)
)
gradient_check(param_pairs, model, ex, label)
@pytest.mark.parametrize("hidden_activation, d_hidden_activation", [
[np.tanh, utils.d_tanh],
[utils.relu, utils.d_relu]
])
def test_np_tree_nn(hidden_activation, d_hidden_activation):
# A tiny dataset so that we can run `fit` and set all the model
# parameters:
vocab = ["1", "+", "2"]
X = [
"(even (odd 1) (neutral (neutral +) (odd 1)))",
"(odd (odd 1) (neutral (neutral +) (even 2)))"]
X = [Tree.fromstring(ex) for ex in X]
y = [tree.label() for tree in X]
model = TreeNN(
vocab,
max_iter=10,
hidden_dim=5,
hidden_activation=hidden_activation,
d_hidden_activation=d_hidden_activation)
model.fit(X, y)
# Use the first example for the check:
ex = X[0]
label = model._onehot_encode([ex.label()])[0]
# Forward and backward to get the gradients:
hidden, pred = model.forward_propagation(ex)
d_W_hy, d_b_y, d_W, d_b = model.backward_propagation(
hidden, pred, ex, label)
# Model parameters to check:
param_pairs = (
('W_hy', d_W_hy),
('b_y', d_b_y),
('W', d_W),
('b', d_b)
)
gradient_check(param_pairs, model, ex, label)
def gradient_check(param_pairs, model, ex, label, epsilon=0.0001, threshold=0.001):
"""
Numerical gradient check following the method described here:
http://ufldl.stanford.edu/wiki/index.php/Gradient_checking_and_advanced_optimization
Parameters
----------
param_pairs : list of str, np.aray pairs
In each pair, the first is the name of the parameter to check,
and the second is its purported derivatives. We use the name
as the first pair so that we can raise an informative error
message in the case of a failure.
model : trained model instance
This should have attributes for all of the parameters named in
`param_pairs`, and it must have methods `forward_propagation`,
and `get_error`.
ex : an example that `model` can process
label : a label vector that `model` can learn from directly
epsilon : float
The small constant by which the parameter values are changed.
threshold : float
Tolerance for raising an error.
Raises
------
GradientCheckError
"""
for param_name, d_params in param_pairs:
params = getattr(model, param_name)
# This iterator will allow is to cycle over all the values for
# arrays of any dimension:
iterator = np.nditer(params, flags=['multi_index'], op_flags=['readwrite'])
while not iterator.finished:
idx = iterator.multi_index
actual = params[idx]
params[idx] = actual + epsilon
_, pred = model.forward_propagation(ex)
grad_pos = model.get_error(pred, label)
params[idx] = actual - epsilon
_, pred = model.forward_propagation(ex)
grad_neg = model.get_error(pred, label)
grad_est = (grad_pos - grad_neg) / (epsilon * 2.0)
params[idx] = actual
grad_bp = d_params[idx]
# Relative error to control for differences in proportion
# across parameter values:
err = np.abs(grad_bp - grad_est) / (np.abs(grad_bp) + np.abs(grad_est))
if err >= threshold:
raise GradientCheckError(
"Gradient check error for {} at {}: error is {}".format(
param_name, idx, err))
iterator.iternext()
|
|
import copy
import operator
from functools import wraps
import sys
import warnings
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import copyreg
# You can't trivially replace this with `functools.partial` because this binds
# to classes and returns bound instances, whereas functools.partial (on
# CPython) is a type and its instances don't bind.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))
return _curried
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
warnings.warn("memoize wrapper is deprecated and will be removed in "
"Django 1.9. Use django.utils.lru_cache instead.",
RemovedInDjango19Warning, stacklevel=2)
@wraps(func)
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wrapper
class cached_property(object):
"""
Decorator that converts a method with a single self argument into a
property cached on the instance.
Optional ``name`` argument allows you to make cached properties of other
methods. (e.g. url = cached_property(get_absolute_url, name='url') )
"""
def __init__(self, func, name=None):
self.func = func
self.name = name or func.__name__
def __get__(self, instance, type=None):
if instance is None:
return self
res = instance.__dict__[self.name] = self.func(instance)
return res
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
@total_ordering
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(func, self.__args, self.__kw) + resultclasses
)
@classmethod
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for type_ in reversed(resultclass.mro()):
for (k, v) in type_.__dict__.items():
# All __promise__ return the same wrapper method, but
# they also do setup, inserting the method into the
# dispatch dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_bytes = bytes in resultclasses
cls._delegate_text = six.text_type in resultclasses
assert not (cls._delegate_bytes and cls._delegate_text), "Cannot call lazy() with both bytes and text return types."
if cls._delegate_text:
if six.PY3:
cls.__str__ = cls.__text_cast
else:
cls.__unicode__ = cls.__text_cast
elif cls._delegate_bytes:
if six.PY3:
cls.__bytes__ = cls.__bytes_cast
else:
cls.__str__ = cls.__bytes_cast
@classmethod
def __promise__(cls, klass, funcname, method):
# Builds a wrapper around some magic method and registers that
# magic method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = method
return __wrapper__
def __text_cast(self):
return func(*self.__args, **self.__kw)
def __bytes_cast(self):
return bytes(func(*self.__args, **self.__kw))
def __cast(self):
if self._delegate_bytes:
return self.__bytes_cast()
elif self._delegate_text:
return self.__text_cast()
else:
return func(*self.__args, **self.__kw)
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() == other
def __lt__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() < other
def __hash__(self):
return hash(self.__cast())
def __mod__(self, rhs):
if self._delegate_bytes and six.PY2:
return bytes(self) % rhs
elif self._delegate_text:
return six.text_type(self) % rhs
return self.__cast() % rhs
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + list(six.itervalues(kwargs)):
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wrapper
empty = object()
def new_method_proxy(func):
def inner(self, *args):
if self._wrapped is empty:
self._setup()
return func(self._wrapped, *args)
return inner
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
# Avoid infinite recursion when tracing __init__ (#19456).
_wrapped = None
def __init__(self):
self._wrapped = empty
__getattr__ = new_method_proxy(getattr)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is empty:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is empty:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialize the wrapped object.
"""
raise NotImplementedError('subclasses of LazyObject must provide a _setup() method')
# Because we have messed with __class__ below, we confuse pickle as to what
# class we are pickling. It also appears to stop __reduce__ from being
# called. So, we define __getstate__ in a way that cooperates with the way
# that pickle interprets this class. This fails when the wrapped class is
# a builtin, but it is better than nothing.
def __getstate__(self):
if self._wrapped is empty:
self._setup()
return self._wrapped.__dict__
# Python 3.3 will call __reduce__ when pickling; this method is needed
# to serialize and deserialize correctly.
@classmethod
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def __reduce_ex__(self, proto):
if proto >= 2:
# On Py3, since the default protocol is 3, pickle uses the
# ``__newobj__`` method (& more efficient opcodes) for writing.
return (self.__newobj__, (self.__class__,), self.__getstate__())
else:
# On Py2, the default protocol is 0 (for back-compat) & the above
# code fails miserably (see regression test). Instead, we return
# exactly what's returned if there's no ``__reduce__`` method at
# all.
return (copyreg._reconstructor, (self.__class__, object, None), self.__getstate__())
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use type(self), not self.__class__, because the
# latter is proxied.
result = type(self)()
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
if six.PY3:
__bytes__ = new_method_proxy(bytes)
__str__ = new_method_proxy(str)
__bool__ = new_method_proxy(bool)
else:
__str__ = new_method_proxy(str)
__unicode__ = new_method_proxy(unicode)
__nonzero__ = new_method_proxy(bool)
# Introspection support
__dir__ = new_method_proxy(dir)
# Need to pretend to be the wrapped class, for the sake of objects that
# care about this (especially in equality tests)
__class__ = property(new_method_proxy(operator.attrgetter("__class__")))
__eq__ = new_method_proxy(operator.eq)
__ne__ = new_method_proxy(operator.ne)
__hash__ = new_method_proxy(hash)
# Dictionary methods support
__getitem__ = new_method_proxy(operator.getitem)
__setitem__ = new_method_proxy(operator.setitem)
__delitem__ = new_method_proxy(operator.delitem)
__len__ = new_method_proxy(len)
__contains__ = new_method_proxy(operator.contains)
# Workaround for http://bugs.python.org/issue12370
_super = super
class SimpleLazyObject(LazyObject):
"""
A lazy object initialized from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
_super(SimpleLazyObject, self).__init__()
def _setup(self):
self._wrapped = self._setupfunc()
# Return a meaningful representation of the lazy object for debugging
# without evaluating the wrapped object.
def __repr__(self):
if self._wrapped is empty:
repr_attr = self._setupfunc
else:
repr_attr = self._wrapped
return '<%s: %r>' % (type(self).__name__, repr_attr)
def __deepcopy__(self, memo):
if self._wrapped is empty:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
return copy.deepcopy(self._wrapped, memo)
class lazy_property(property):
"""
A property that works with subclasses by wrapping the decorated
functions of the base class.
"""
def __new__(cls, fget=None, fset=None, fdel=None, doc=None):
if fget is not None:
@wraps(fget)
def fget(instance, instance_type=None, name=fget.__name__):
return getattr(instance, name)()
if fset is not None:
@wraps(fset)
def fset(instance, value, name=fset.__name__):
return getattr(instance, name)(value)
if fdel is not None:
@wraps(fdel)
def fdel(instance, name=fdel.__name__):
return getattr(instance, name)()
return property(fget, fset, fdel, doc)
def partition(predicate, values):
"""
Splits the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
if sys.version_info >= (2, 7, 2):
from functools import total_ordering
else:
# For Python < 2.7.2. total_ordering in versions prior to 2.7.2 is buggy.
# See http://bugs.python.org/issue10042 for details. For these versions use
# code borrowed from Python 2.7.3.
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
roots = set(dir(cls)) & set(convert)
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
|
|
"""
Takes in the source of samples (e.g., superformular variables or glassware images),
and gets the xy coordinates of their contours.
data : shape representation using xy coordinates of the contours
data_l : shape representation after dimensionality reduction of data
Usage: python parametric_space.py
Author(s): Wei Chen (wchen459@umd.edu)
"""
import glob
import os
import sys
import ConfigParser
import numpy as np
from superformula import superformula, get_sf_parameters
import random
from data_processing import preprocess_input
from data_processing import divide_input
from util import create_dir, reduce_dim
def get_glass_xy(image_paths, n_samples, n_points, n_control_points):
from glass import process_image
x_plots = []
for index in range(n_samples):
print('Processing: ' + os.path.basename(image_paths[index]))
xy = process_image(image_paths[index], n_control_points, n_points)
#xy = np.concatenate((x.reshape(-1,1), y.reshape(-1,1)), axis=1).flatten()
x_plots.append(xy)
return x_plots
def get_superformula_xy(source_dir, n_samples, n_points):
x_plots = []
config = ConfigParser.ConfigParser()
config.read('config.ini')
orig_space_min = config.getfloat('Superformula', 'orig_space_min')
orig_space_max = config.getfloat('Superformula', 'orig_space_max')
alpha = config.getfloat('Superformula', 'nonlinearity')
beta = config.getint('Superformula', 'n_clusters')
fname = source_dir+'variables.npy'
if not os.path.isfile(fname): # If input file .npy not exist in source directory
variables = source_sf(n_samples, alpha, beta, orig_space_min, orig_space_max)
create_dir(source_dir)
np.save(fname, variables)
print 'Superformula variables saved in %s.' % fname
else:
print 'Using the existing variables.'
variables = np.load(fname)
parameters = get_sf_parameters(variables, alpha, beta)
for index in range(n_samples):
print(str(index+1) + ' - Processing: ' + str(parameters[index]))
x, y = superformula(*parameters[index], num_points=n_points)
xy = np.concatenate((x.reshape(-1,1), y.reshape(-1,1)), axis=1).flatten()
x_plots.append(xy)
return x_plots
def source_sf(n_samples, alpha, beta, orig_space_min, orig_space_max):
variables = []
for i in range(n_samples):
s = random.uniform(orig_space_min, orig_space_max)
t = random.uniform(orig_space_min, orig_space_max)
variables.append([s, t])
return np.array(variables)
def add_noise(data, noise_scale=0):
if noise_scale != 0:
np.random.seed(0)
scale=np.sqrt(noise_scale/(1.0-noise_scale))
# data *= np.random.normal(loc=1.0, scale=scale, size=data.shape) # relative
data += np.random.normal(loc=0.0, scale=np.mean(np.abs(data))*scale, size=data.shape) # absolute
return data
def initialize(verbose=0, raw_data=0):
config = ConfigParser.ConfigParser()
config.read('config.ini')
SOURCE_DIR = config.get('Global', 'SOURCE_DIR')
source = config.get('Global', 'source')
n_points = config.getint('Global', 'n_points')
noise_scale = config.getfloat('Global', 'noise_scale')
n_samples = config.getint('Global', 'n_samples')
if source == 'sf':
alpha = config.getfloat('Superformula', 'nonlinearity')
beta = config.getint('Superformula', 'n_clusters')
sname = source + '-' + str(beta) + '-' + str(alpha)
orig_space_min = config.getfloat('Superformula', 'orig_space_min')
orig_space_max = config.getfloat('Superformula', 'orig_space_max')
source_dir = SOURCE_DIR + 'sf-' + str(orig_space_min) + '-' + str(orig_space_max) + '/'
elif source == 'glass' or source[:3] == 'sf-':
sname = source
source_dir = SOURCE_DIR + source + '/'
else:
print 'Wrong source!'
sys.exit(0)
create_dir(source_dir)
# Get parametric data
fname = source_dir+'raw_parametric_%s.npy' % sname
if os.path.isfile(fname):
data = np.load(fname)
n_samples = min(n_samples, data.shape[0])
data = data[:n_samples]
else:
if source == 'glass':
n_control_points = config.getint('Glass', 'n_control_points')
image_paths = glob.glob(source_dir+"*.*")
image_paths.remove(*glob.glob(source_dir+"*.npy"))
n_samples = min(n_samples, len(image_paths))
x_plots = get_glass_xy(image_paths, n_samples, n_points, n_control_points)
elif source == 'sf':
x_plots = get_superformula_xy(source_dir, n_samples, n_points)
else:
print 'No source called %s!' % source
sys.exit(0)
data = np.zeros((n_samples, 2*n_points))
for index in range(n_samples):
data[index,:] = x_plots[index].flatten()
# Shuffle
np.random.shuffle(data)
# Centering
if source == 'glass':
data = preprocess_input(data, center_x=False)
else:
data = preprocess_input(data, center_x=True)
np.save(fname, data)
print 'Parametric data saved in %s.' % fname
print('Source: '+sname+' | Points: '+str(n_points)+' | Samples: '+str(n_samples)+' | Noise: '+str(noise_scale))
data = add_noise(data, noise_scale) # Add noise
if raw_data:
return data
data_l, dim_increase = reduce_dim(data, plot=False) # reduce dimensionality
data_list = []
f0name = SOURCE_DIR+'raw_parametric_%s_%.4f_0.npy' % (sname, noise_scale)
if config.getboolean('Global', 'cluster'):
if not os.path.isfile(f0name):
# Clustering
print 'Clustering ...'
cluster_indices = divide_input(data_l, verbose=verbose)
# Divide .npy file
c = 0
for ci in cluster_indices:
print 'Cluster ', c
print 'Sample size: ', len(ci)
fcname = SOURCE_DIR+'raw_parametric_%s_%.4f_%d.npy' % (sname, noise_scale, c)
np.save(fcname, data[ci])
print 'Parametric data saved in %s.' % fcname
data_list.append(data[ci])
c += 1
else:
# Directly load data of each cluster from a corresponding .np file
c = 0
fcname = f0name
while os.path.isfile(fcname):
data_list.append(np.load(fcname))
if verbose:
print 'Cluster ', c
print 'Sample size: ', len(data_list[c])
c += 1
fcname = SOURCE_DIR+'raw_parametric_%s_%.4f_%d.npy' % (sname, noise_scale, c)
else:
data_list.append(data)
return data_list, source, sname, n_samples, n_points, noise_scale, source_dir
if __name__ == "__main__":
initialize(verbose=1)
|
|
#
# Handle the special case of the first scenario
#
self.notebook.switchScenario(0,scenarioType="Powder")
#
#
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'vasp'
tab.settings['Output file name'] = 'OUTCAR'
tab.settings['Excel file name'] = 'application_note_mie.xlsx'
tab.settings['Script file name'] = 'application_note_mie.py'
tab.settings['QM program'] = 'vasp'
#
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = True
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'average'
tab.settings['Optical permittivity edited'] = False
tab.sigmas_cm1 = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
#
#
tab = self.notebook.scenarios[0]
tab.settings['Legend'] = 'MG vf=10%'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 0.1
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[1]
tab.settings['Legend'] = 'MG vf=5%'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.11513919351635903
tab.settings['Volume fraction'] = 0.05000000000000002
tab.settings['Particle size(mu)'] = 0.1
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[2]
tab.settings['Legend'] = 'MG vf=1%'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.024364320548406322
tab.settings['Volume fraction'] = 0.01
tab.settings['Particle size(mu)'] = 0.1
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[3]
tab.settings['Legend'] = 'Mie vf=10%'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 0.1
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[4]
tab.settings['Legend'] = 'Mie vf=5%'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.11513919351635903
tab.settings['Volume fraction'] = 0.05000000000000002
tab.settings['Particle size(mu)'] = 0.1
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[5]
tab.settings['Legend'] = 'Mie vf=1%'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.024364320548406322
tab.settings['Volume fraction'] = 0.01
tab.settings['Particle size(mu)'] = 0.1
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[6]
tab.settings['Legend'] = '0.1 mu'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 0.1
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[7]
tab.settings['Legend'] = '1.0 mu'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 1.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[8]
tab.settings['Legend'] = '1.5 mu'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 1.5
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[9]
tab.settings['Legend'] = '2.0 mu'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 2.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[10]
tab.settings['Legend'] = '3.0 mu'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 3.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[11]
tab.settings['Legend'] = '4.0 mu'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 4.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
self.notebook.addScenario(scenarioType="Powder")
tab = self.notebook.scenarios[12]
tab.settings['Legend'] = '5.0 mu'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'ptfe'
tab.settings['Matrix density'] = 2.2
tab.settings['Matrix permittivity'] = 2.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.21550194207682977
tab.settings['Volume fraction'] = 0.09999999999999999
tab.settings['Particle size(mu)'] = 5.0
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Mie'
tab.settings['Particle shape'] = 'Sphere'
#
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 400
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
#
tab = self.notebook.viewerTab
tab.settings['Atom scaling'] = 0.5
tab.settings['Maximum displacement'] = 1.0
tab.settings['Bond colour'] = [80, 80, 80, 255]
tab.settings['Bond radius'] = 0.1
tab.settings['Cell colour'] = [255, 0, 0, 255]
tab.settings['Cell radius'] = 0.1
tab.settings['Background colour'] = [120, 120, 120, 255]
tab.settings['Arrow colour'] = [0, 255, 0, 255]
tab.settings['Arrow radius'] = 0.07
tab.settings['Number of phase steps'] = 41
tab.settings['Super Cell'] = [1, 1, 1]
#
#
tab = self.notebook.fitterTab
tab.settings['Excel file name'] = ''
tab.settings['Plot title'] = 'Experimental and Calculated Spectral Comparison'
tab.settings['Fitting type'] = 'Minimise x-correlation'
tab.settings['Number of iterations'] = 20
tab.settings['Frequency scaling factor'] = 1.0
tab.settings['Optimise frequency scaling'] = False
tab.settings['Spectrum scaling'] = False
tab.settings['Spectrum scaling factor'] = 1.0
tab.settings['Independent y-axes'] = True
tab.settings['Spectral difference threshold'] = 0.05
tab.settings['HPFilter lambda'] = 7.0
tab.settings['Baseline removal'] = False
tab.settings['Scenario index'] = 0
#
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 0
tab.settings['Maximum frequency'] = 500
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Number of atoms'] = 1
tab.settings['Plot type'] = 'Powder Molar Absorption'
tab.settings['concentration'] = 33.41796705902045
tab.settings['cell concentration'] = 33.41796705902045
|
|
from stat import S_IFREG
from functools import partial
import errno
import traceback
import sys
import os
from os.path import realpath
from threading import Lock
import subprocess
import yaml
from xattr import xattr
from fuse import FUSE
from fuse import LoggingMixIn, Operations, FuseOSError
if not hasattr(__builtins__, 'bytes'):
bytes = str
class File(object):
"""
I correspond to a real file or path.
"""
def __init__(self, fs, root):
self.fs = fs
self.root = realpath(root)
self.rwlock = Lock()
def __repr__(self):
return '<%s 0x%x %r>' % (self.__class__.__name__, id(self), self.root)
def child(self, segment):
return File(self.fs, os.path.join(self.root, segment))
def access(self, mode):
if not os.access(self.root, mode):
raise FuseOSError(errno.EACCES)
def chmod(self, *args, **kwargs):
os.chmod(self.root, *args, **kwargs)
def chown(self, *args, **kwargs):
os.chown(self.root, *args, **kwargs)
def create(self, mode):
return os.open(self.root, os.O_WRONLY | os.O_CREAT, mode)
def flush(self, fh):
return os.fsync(fh)
def fsync(self, datasync, fh):
return os.fsync(fh)
def getattr(self, fh=None):
st = os.lstat(self.root)
return dict((key, getattr(st, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
def getxattr(self, name, position=0):
try:
return xattr(self.root).get(name)
except IOError:
return ''
def link(self, target, source):
return os.link(source, target)
def listxattr(self):
return xattr(self.root).list()
def mkdir(self, *args, **kwargs):
return os.mkdir(self.root, *args, **kwargs)
def mknod(self, *args, **kwargs):
return os.mknod(self.root, *args, **kwargs)
def open(self, *args, **kwargs):
return os.open(self.root, *args, **kwargs)
def read(self, size, offset, fh):
with self.rwlock:
os.lseek(fh, offset, 0)
return os.read(fh, size)
def listRealChildren(self):
return os.listdir(self.root)
def readdir(self, fh):
return ['.', '..'] + self.listRealChildren()
def readLink(self, *args, **kwargs):
return os.readlink(self.root, *args, **kwargs)
def release(self, fh):
return os.close(fh)
def rename(self, old, new):
return os.rename(old, self.root + new)
def rmdir(self, *args, **kwargs):
return os.rmdir(self.root, *args, **kwargs)
def setxattr(self, name, value, options, position=0):
return xattr(self.root).set(name, value, options)
def statfs(self):
stv = os.statvfs(self.root)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
def symlink(self, target, source):
return os.symlink(source, target)
def truncate(self, length, fh=None):
with open(self.root, 'r+') as f:
f.truncate(length)
def unlink(self, *args, **kwargs):
return os.unlink(self.root, *args, **kwargs)
def utimens(self, *args, **kwargs):
return os.utime(self.root, *args, **kwargs)
def write(self, data, offset, fh):
with self.rwlock:
os.lseek(fh, offset, 0)
return os.write(fh, data)
class DynamicAwareFile(File):
def child(self, segment):
if segment in self.listRealChildren():
return DynamicAwareFile(self.fs, os.path.join(self.root, segment))
else:
return self.dynamicSettings().getFile(self.fs, segment)
def dynamicSettings(self):
config_file = os.path.join(self.root, '.config.yml')
data = []
if os.path.exists(config_file):
fh = open(config_file, 'rb')
data = yaml.safe_load(fh)
return DynamicSettings(config_file, data)
# ------- fuse stuff
def readdir(self, fh):
static = File.readdir(self, fh)
d = self.dynamicSettings()
return static + d.listFiles()
class DynamicSettings(object):
def __init__(self, config_file, data):
self.data = data or []
self.config_file = config_file
def listFiles(self):
return [x['filename'] for x in self.data]
def fullfilename(self, filename):
return os.path.join(os.path.dirname(self.config_file), filename)
def getFile(self, fs, filename):
for item in self.data:
if item['filename'] == filename:
cacher = fs.getCacher(self.fullfilename(filename),
item.get('cache'))
workdir = item.get('workdir', '') or os.path.dirname(self.config_file)
return ScriptFile(fs=fs,
workdir=workdir,
out_script=item['out_script'],
cacher=cacher,
env=item.get('env', None))
class NoCacher(object):
def __call__(self, func, *args, **kwargs):
return func(*args, **kwargs)
class StatCacher(object):
def __init__(self, path, recurse=False):
self._cached_mtime = None
self._cached_val = None
self.path = os.path.abspath(path)
self.recurse = recurse
def _directories(self):
for f in os.walk(self.path):
yield f[0]
def get_mtime(self):
if self.recurse:
return max((os.stat(x).st_mtime for x in self._directories()))
else:
return os.stat(self.path).st_mtime
def __call__(self, func, *args, **kwargs):
mtime = self.get_mtime()
if mtime != self._cached_mtime:
self._cached_val = func(*args, **kwargs)
self._cached_mtime = mtime
return self._cached_val
class ScriptFile(object):
def __init__(self, fs, workdir, out_script, cacher=None,
env=None):
self.out_script = out_script
self.workdir = workdir
self.env = env or {}
self.fs = fs
if cacher:
self.getContents = partial(cacher, self._runOutputScript)
else:
self.getContents = self._runOutputScript
def _runOutputScript(self):
try:
args = self.out_script
env = os.environ.copy()
env.update(self.env)
env['ROOT'] = self.fs.mountpoint
p = subprocess.Popen(args,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=self.workdir)
out, err = p.communicate('')
return out
except Exception as e:
return traceback.format_exc(e)
def get_size(self):
return len(self.getContents())
def access(self, mode):
pass
def chmod(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
os.chmod(self.root, *args, **kwargs)
def chown(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
os.chown(self.root, *args, **kwargs)
def create(self, mode):
raise FuseOSError(errno.EACCES)
def flush(self, fh):
return os.fsync(fh)
def fsync(self, datasync, fh):
return os.fsync(fh)
def getattr(self, fh=None):
return dict(
st_mode=(S_IFREG | 0440),
st_nlink=1,
st_size=self.get_size(),
st_ctime=0,
st_mtime=0,
st_atime=0)
getxattr = None
def link(self, target, source):
raise FuseOSError(errno.EACCES)
listxattr = None
def mkdir(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
def mknod(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
def open(self, *args, **kwargs):
return 0
def read(self, size, offset, fh):
return self.getContents()[offset:offset + size]
def readdir(self, fh):
raise FuseOSError(errno.EACCES)
def readLink(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
def release(self, fh):
raise FuseOSError(errno.EACCES)
def rename(self, old, new):
raise FuseOSError(errno.EACCES)
def rmdir(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
def symlink(self, target, source):
raise FuseOSError(errno.EACCES)
def truncate(self, length, fh=None):
raise FuseOSError(errno.EACCES)
def unlink(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
def utimens(self, *args, **kwargs):
raise FuseOSError(errno.EACCES)
def write(self, data, offset, fh):
raise FuseOSError(errno.EACCES)
class FileSystem(LoggingMixIn, Operations):
def __init__(self, root, mountpoint):
self.root = realpath(root)
self.mountpoint = realpath(mountpoint)
print 'root %r -> mount %r' % (self.root, self.mountpoint)
self.rwlock = Lock()
self._caches = {}
def onresource(name):
def func(self, path, *args, **kwargs):
resource = self.resource(path)
method = getattr(resource, name)
if not method:
raise FuseOSError(errno.EACCES)
try:
return method(*args, **kwargs)
except Exception:
raise
return func
def getCacher(self, filename, cache_settings):
if filename not in self._caches:
if cache_settings is None:
self._caches[filename] = NoCacher()
else:
kwargs = cache_settings.copy()
kwargs.pop('method')
kwargs['path'] = os.path.join(os.path.dirname(filename), kwargs['path'])
self._caches[filename] = StatCacher(**kwargs)
return self._caches[filename]
def __call__(self, op, path, *args):
# print op, path, args
return super(FileSystem, self).__call__(op, path, *args)
def resource(self, path):
segments = path.lstrip('/').split('/')
node = DynamicAwareFile(self, self.root)
for segment in segments:
if not segment:
continue
node = node.child(segment)
if not node:
return DynamicAwareFile(self, os.path.join(self.root, path.lstrip('/')))
return node
access = onresource('access')
chmod = onresource('chmod')
chown = onresource('chown')
create = onresource('create')
flush = onresource('flush')
fsync = onresource('fsync')
getattr = onresource('getattr')
getxattr = onresource('getxattr')
link = onresource('link')
listxattr = onresource('listxattr')
mkdir = onresource('mkdir')
mknod = onresource('mknod')
open = onresource('open')
read = onresource('read')
readdir = onresource('readdir')
readlink = onresource('readlink')
release = onresource('release')
rename = onresource('rename')
rmdir = onresource('rmdir')
statfs = onresource('statfs')
symlink = onresource('symlink')
truncate = onresource('truncate')
unlink = onresource('unlink')
utimens = onresource('utimens')
write = onresource('write')
def run():
basedir = sys.argv[1]
mountpoint = sys.argv[2]
fs = FileSystem(basedir, mountpoint)
if not os.path.exists(mountpoint):
os.makedirs(mountpoint)
FUSE(fs, mountpoint, direct_io=True, foreground=True)
if __name__ == '__main__':
run()
|
|
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import hashlib
import importlib
import os
import os.path
import socket
import struct
import tempfile
import eventlet
import mock
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_context import context as common_context
from oslo_context import fixture as context_fixture
from oslo_utils import encodeutils
from oslo_utils import timeutils
from oslo_utils import units
import six
import nova
from nova import context
from nova import exception
from nova import test
from nova import utils
CONF = cfg.CONF
class GenericUtilsTestCase(test.NoDBTestCase):
def test_parse_server_string(self):
result = utils.parse_server_string('::1')
self.assertEqual(('::1', ''), result)
result = utils.parse_server_string('[::1]:8773')
self.assertEqual(('::1', '8773'), result)
result = utils.parse_server_string('2001:db8::192.168.1.1')
self.assertEqual(('2001:db8::192.168.1.1', ''), result)
result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773')
self.assertEqual(('2001:db8::192.168.1.1', '8773'), result)
result = utils.parse_server_string('192.168.1.1')
self.assertEqual(('192.168.1.1', ''), result)
result = utils.parse_server_string('192.168.1.2:8773')
self.assertEqual(('192.168.1.2', '8773'), result)
result = utils.parse_server_string('192.168.1.3')
self.assertEqual(('192.168.1.3', ''), result)
result = utils.parse_server_string('www.example.com:8443')
self.assertEqual(('www.example.com', '8443'), result)
result = utils.parse_server_string('www.example.com')
self.assertEqual(('www.example.com', ''), result)
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
result = utils.parse_server_string('')
self.assertEqual(('', ''), result)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_generate_password(self):
password = utils.generate_password()
self.assertTrue([c for c in password if c in '0123456789'])
self.assertTrue([c for c in password
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
def test_read_file_as_root(self):
def fake_execute(*args, **kwargs):
if args[1] == 'bad':
raise processutils.ProcessExecutionError()
return 'fakecontents', None
self.stubs.Set(utils, 'execute', fake_execute)
contents = utils.read_file_as_root('good')
self.assertEqual(contents, 'fakecontents')
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root, 'bad')
def test_temporary_chown(self):
def fake_execute(*args, **kwargs):
if args[0] == 'chown':
fake_execute.uid = args[1]
self.stubs.Set(utils, 'execute', fake_execute)
with tempfile.NamedTemporaryFile() as f:
with utils.temporary_chown(f.name, owner_uid=2):
self.assertEqual(fake_execute.uid, 2)
self.assertEqual(fake_execute.uid, os.getuid())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
self.assertEqual('&', utils.xhtml_escape('&'))
self.assertEqual('>', utils.xhtml_escape('>'))
self.assertEqual('<', utils.xhtml_escape('<'))
self.assertEqual('<foo>', utils.xhtml_escape('<foo>'))
def test_is_valid_ipv6_cidr(self):
self.assertTrue(utils.is_valid_ipv6_cidr("2600::/64"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254/48"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001/32"))
self.assertTrue(utils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertFalse(utils.is_valid_ipv6_cidr("foo"))
self.assertFalse(utils.is_valid_ipv6_cidr("127.0.0.1"))
def test_get_shortened_ipv6(self):
self.assertEqual("abcd:ef01:2345:6789:abcd:ef01:c0a8:fefe",
utils.get_shortened_ipv6(
"abcd:ef01:2345:6789:abcd:ef01:192.168.254.254"))
self.assertEqual("::1", utils.get_shortened_ipv6(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertEqual("caca::caca:0:babe:201:102",
utils.get_shortened_ipv6(
"caca:0000:0000:caca:0000:babe:0201:0102"))
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError, utils.get_shortened_ipv6,
"failure")
def test_get_shortened_ipv6_cidr(self):
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600:0000:0000:0000:0000:0000:0000:0000/64"))
self.assertEqual("2600::/64", utils.get_shortened_ipv6_cidr(
"2600::1/64"))
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"127.0.0.1")
self.assertRaises(netaddr.AddrFormatError,
utils.get_shortened_ipv6_cidr,
"failure")
def test_safe_ip_format(self):
self.assertEqual("[::1]", utils.safe_ip_format("::1"))
self.assertEqual("127.0.0.1", utils.safe_ip_format("127.0.0.1"))
self.assertEqual("[::ffff:127.0.0.1]", utils.safe_ip_format(
"::ffff:127.0.0.1"))
self.assertEqual("localhost", utils.safe_ip_format("localhost"))
def test_get_hash_str(self):
base_str = b"foo"
base_unicode = u"foo"
value = hashlib.md5(base_str).hexdigest()
self.assertEqual(
value, utils.get_hash_str(base_str))
self.assertEqual(
value, utils.get_hash_str(base_unicode))
def test_use_rootwrap(self):
self.flags(disable_rootwrap=False, group='workarounds')
self.flags(rootwrap_config='foo')
cmd = utils._get_root_helper()
self.assertEqual('sudo nova-rootwrap foo', cmd)
def test_use_sudo(self):
self.flags(disable_rootwrap=True, group='workarounds')
cmd = utils._get_root_helper()
self.assertEqual('sudo', cmd)
def test_ssh_execute(self):
expected_args = ('ssh', '-o', 'BatchMode=yes',
'remotehost', 'ls', '-l')
with mock.patch('nova.utils.execute') as mock_method:
utils.ssh_execute('remotehost', 'ls', '-l')
mock_method.assert_called_once_with(*expected_args)
class TestCachedFile(test.NoDBTestCase):
@mock.patch('os.path.getmtime', return_value=1)
def test_read_cached_file(self, getmtime):
utils._FILE_CACHE = {
'/this/is/a/fake': {"data": 1123, "mtime": 1}
}
fresh, data = utils.read_cached_file("/this/is/a/fake")
fdata = utils._FILE_CACHE['/this/is/a/fake']["data"]
self.assertEqual(fdata, data)
@mock.patch('os.path.getmtime', return_value=2)
def test_read_modified_cached_file(self, getmtime):
utils._FILE_CACHE = {
'/this/is/a/fake': {"data": 1123, "mtime": 1}
}
fake_contents = "lorem ipsum"
with mock.patch('six.moves.builtins.open',
mock.mock_open(read_data=fake_contents)):
fresh, data = utils.read_cached_file("/this/is/a/fake")
self.assertEqual(data, fake_contents)
self.assertTrue(fresh)
def test_delete_cached_file(self):
filename = '/this/is/a/fake/deletion/of/cached/file'
utils._FILE_CACHE = {
filename: {"data": 1123, "mtime": 1}
}
self.assertIn(filename, utils._FILE_CACHE)
utils.delete_cached_file(filename)
self.assertNotIn(filename, utils._FILE_CACHE)
def test_delete_cached_file_not_exist(self):
# We expect that if cached file does not exist no Exception raised.
filename = '/this/is/a/fake/deletion/attempt/of/not/cached/file'
self.assertNotIn(filename, utils._FILE_CACHE)
utils.delete_cached_file(filename)
self.assertNotIn(filename, utils._FILE_CACHE)
class VPNPingTestCase(test.NoDBTestCase):
"""Unit tests for utils.vpn_ping()."""
def setUp(self):
super(VPNPingTestCase, self).setUp()
self.port = 'fake'
self.address = 'fake'
self.session_id = 0x1234
self.fmt = '!BQxxxxxQxxxx'
def fake_reply_packet(self, pkt_id=0x40):
return struct.pack(self.fmt, pkt_id, 0x0, self.session_id)
def setup_socket(self, mock_socket, return_value, side_effect=None):
socket_obj = mock.MagicMock()
if side_effect is not None:
socket_obj.recv.side_effect = side_effect
else:
socket_obj.recv.return_value = return_value
mock_socket.return_value = socket_obj
@mock.patch.object(socket, 'socket')
def test_vpn_ping_timeout(self, mock_socket):
"""Server doesn't reply within timeout."""
self.setup_socket(mock_socket, None, socket.timeout)
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertFalse(rc)
@mock.patch.object(socket, 'socket')
def test_vpn_ping_bad_len(self, mock_socket):
"""Test a short/invalid server reply."""
self.setup_socket(mock_socket, 'fake_reply')
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertFalse(rc)
@mock.patch.object(socket, 'socket')
def test_vpn_ping_bad_id(self, mock_socket):
"""Server sends an unknown packet ID."""
self.setup_socket(mock_socket, self.fake_reply_packet(pkt_id=0x41))
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertFalse(rc)
@mock.patch.object(socket, 'socket')
def test_vpn_ping_ok(self, mock_socket):
self.setup_socket(mock_socket, self.fake_reply_packet())
rc = utils.vpn_ping(self.address, self.port,
session_id=self.session_id)
self.assertTrue(rc)
class MonkeyPatchTestCase(test.NoDBTestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'nova.tests.unit.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package + 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION = []
from nova.tests.unit.monkey_patch_example import example_a
from nova.tests.unit.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertIn(package_a + 'example_function_a',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertIn(package_a + 'ExampleClassA.example_method',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertIn(package_a + 'ExampleClassA.example_method_add',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertNotIn(package_b + 'example_function_b',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertNotIn(package_b + 'ExampleClassB.example_method',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertNotIn(package_b + 'ExampleClassB.example_method_add',
nova.tests.unit.monkey_patch_example.CALLED_FUNCTION)
class MonkeyPatchDefaultTestCase(test.NoDBTestCase):
"""Unit test for default monkey_patch_modules value."""
def setUp(self):
super(MonkeyPatchDefaultTestCase, self).setUp()
self.flags(
monkey_patch=True)
def test_monkey_patch_default_mod(self):
# monkey_patch_modules is defined to be
# <module_to_patch>:<decorator_to_patch_with>
# Here we check that both parts of the default values are
# valid
for module in CONF.monkey_patch_modules:
m = module.split(':', 1)
# Check we can import the module to be patched
importlib.import_module(m[0])
# check the decorator is valid
decorator_name = m[1].rsplit('.', 1)
decorator_module = importlib.import_module(decorator_name[0])
getattr(decorator_module, decorator_name[1])
class AuditPeriodTest(test.NoDBTestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
# a fairly random time to test with
self.test_time = datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)
timeutils.set_time_override(override_time=self.test_time)
def tearDown(self):
timeutils.clear_time_override()
super(AuditPeriodTest, self).tearDown()
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEqual(begin, datetime.datetime(
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEqual(begin, datetime.datetime(
minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEqual(begin, datetime.datetime(
minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEqual(begin, datetime.datetime(
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEqual(begin, datetime.datetime(
hour=6,
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEqual(begin, datetime.datetime(
hour=10,
day=3,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(
hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEqual(begin, datetime.datetime(
day=2,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(
day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEqual(begin, datetime.datetime(
day=15,
month=1,
year=2012))
self.assertEqual(end, datetime.datetime(
day=15,
month=2,
year=2012))
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEqual(begin, datetime.datetime(
day=1,
month=1,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEqual(begin, datetime.datetime(
day=1,
month=2,
year=2011))
self.assertEqual(end, datetime.datetime(
day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEqual(begin, datetime.datetime(
day=1,
month=6,
year=2010))
self.assertEqual(end, datetime.datetime(
day=1,
month=6,
year=2011))
class MkfsTestCase(test.NoDBTestCase):
def test_mkfs(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '/my/block/dev',
run_as_root=False)
utils.execute('mkfs', '-t', 'msdos', '/my/msdos/block/dev',
run_as_root=False)
utils.execute('mkswap', '/my/swap/block/dev',
run_as_root=False)
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev')
utils.mkfs('msdos', '/my/msdos/block/dev')
utils.mkfs('swap', '/my/swap/block/dev')
def test_mkfs_with_label(self):
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F',
'-L', 'ext4-vol', '/my/block/dev', run_as_root=False)
utils.execute('mkfs', '-t', 'msdos',
'-n', 'msdos-vol', '/my/msdos/block/dev',
run_as_root=False)
utils.execute('mkswap', '-L', 'swap-vol', '/my/swap/block/dev',
run_as_root=False)
self.mox.ReplayAll()
utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
class LastBytesTestCase(test.NoDBTestCase):
"""Test the last_bytes() utility method."""
def setUp(self):
super(LastBytesTestCase, self).setUp()
self.f = six.BytesIO(b'1234567890')
def test_truncated(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 5)
self.assertEqual(out, b'67890')
self.assertTrue(remaining > 0)
def test_read_all(self):
self.f.seek(0, os.SEEK_SET)
out, remaining = utils.last_bytes(self.f, 1000)
self.assertEqual(out, b'1234567890')
self.assertFalse(remaining > 0)
def test_seek_too_far_real_file(self):
# StringIO doesn't raise IOError if you see past the start of the file.
with tempfile.TemporaryFile() as flo:
content = b'1234567890'
flo.write(content)
self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
class MetadataToDictTestCase(test.NoDBTestCase):
def test_metadata_to_dict(self):
self.assertEqual(utils.metadata_to_dict(
[{'key': 'foo1', 'value': 'bar'},
{'key': 'foo2', 'value': 'baz'}]),
{'foo1': 'bar', 'foo2': 'baz'})
def test_metadata_to_dict_empty(self):
self.assertEqual(utils.metadata_to_dict([]), {})
def test_dict_to_metadata(self):
def sort_key(adict):
return sorted(adict.items())
metadata = utils.dict_to_metadata(dict(foo1='bar1', foo2='bar2'))
expected = [{'key': 'foo1', 'value': 'bar1'},
{'key': 'foo2', 'value': 'bar2'}]
self.assertEqual(sorted(metadata, key=sort_key),
sorted(expected, key=sort_key))
def test_dict_to_metadata_empty(self):
self.assertEqual(utils.dict_to_metadata({}), [])
class WrappedCodeTestCase(test.NoDBTestCase):
"""Test the get_wrapped_function utility method."""
def _wrapper(self, function):
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
function(self, *args, **kwargs)
return decorated_function
def test_single_wrapped(self):
@self._wrapper
def wrapped(self, instance, red=None, blue=None):
pass
func = utils.get_wrapped_function(wrapped)
func_code = func.__code__
self.assertEqual(4, len(func_code.co_varnames))
self.assertIn('self', func_code.co_varnames)
self.assertIn('instance', func_code.co_varnames)
self.assertIn('red', func_code.co_varnames)
self.assertIn('blue', func_code.co_varnames)
def test_double_wrapped(self):
@self._wrapper
@self._wrapper
def wrapped(self, instance, red=None, blue=None):
pass
func = utils.get_wrapped_function(wrapped)
func_code = func.__code__
self.assertEqual(4, len(func_code.co_varnames))
self.assertIn('self', func_code.co_varnames)
self.assertIn('instance', func_code.co_varnames)
self.assertIn('red', func_code.co_varnames)
self.assertIn('blue', func_code.co_varnames)
def test_triple_wrapped(self):
@self._wrapper
@self._wrapper
@self._wrapper
def wrapped(self, instance, red=None, blue=None):
pass
func = utils.get_wrapped_function(wrapped)
func_code = func.__code__
self.assertEqual(4, len(func_code.co_varnames))
self.assertIn('self', func_code.co_varnames)
self.assertIn('instance', func_code.co_varnames)
self.assertIn('red', func_code.co_varnames)
self.assertIn('blue', func_code.co_varnames)
class ExpectedArgsTestCase(test.NoDBTestCase):
def test_passes(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
@dec
def func(foo, bar, baz="lol"):
pass
def test_raises(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
def test_var_no_of_args(self):
@utils.expects_func_args('foo')
def dec(f):
return f
@dec
def func(bar, *args, **kwargs):
pass
def test_more_layers(self):
@utils.expects_func_args('foo', 'baz')
def dec(f):
return f
def dec_2(f):
def inner_f(*a, **k):
return f()
return inner_f
@dec_2
def func(bar, baz):
pass
self.assertRaises(TypeError, dec, func)
class StringLengthTestCase(test.NoDBTestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, 'name', max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', 'name', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
def test_check_string_length_noname(self):
self.assertIsNone(utils.check_string_length(
'test', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, max_length=255)
class ValidateIntegerTestCase(test.NoDBTestCase):
def test_valid_inputs(self):
self.assertEqual(
utils.validate_integer(42, "answer"), 42)
self.assertEqual(
utils.validate_integer("42", "answer"), 42)
self.assertEqual(
utils.validate_integer(
"7", "lucky", min_value=7, max_value=8), 7)
self.assertEqual(
utils.validate_integer(
7, "lucky", min_value=6, max_value=7), 7)
self.assertEqual(
utils.validate_integer(
300, "Spartaaa!!!", min_value=300), 300)
self.assertEqual(
utils.validate_integer(
"300", "Spartaaa!!!", max_value=300), 300)
def test_invalid_inputs(self):
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"im-not-an-int", "not-an-int")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
3.14, "Pie")
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
"299", "Sparta no-show",
min_value=300, max_value=300)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
55, "doing 55 in a 54",
max_value=54)
self.assertRaises(exception.InvalidInput,
utils.validate_integer,
six.unichr(129), "UnicodeError",
max_value=1000)
class ValidateNeutronConfiguration(test.NoDBTestCase):
def test_nova_network(self):
self.assertFalse(utils.is_neutron())
def test_neutron(self):
self.flags(network_api_class='nova.network.neutronv2.api.API')
self.assertTrue(utils.is_neutron())
def test_quantum(self):
self.flags(network_api_class='nova.network.quantumv2.api.API')
self.assertTrue(utils.is_neutron())
class AutoDiskConfigUtilTestCase(test.NoDBTestCase):
def test_is_auto_disk_config_disabled(self):
self.assertTrue(utils.is_auto_disk_config_disabled("Disabled "))
def test_is_auto_disk_config_disabled_none(self):
self.assertFalse(utils.is_auto_disk_config_disabled(None))
def test_is_auto_disk_config_disabled_false(self):
self.assertFalse(utils.is_auto_disk_config_disabled("false"))
class GetSystemMetadataFromImageTestCase(test.NoDBTestCase):
def get_image(self):
image_meta = {
"id": "fake-image",
"name": "fake-name",
"min_ram": 1,
"min_disk": 1,
"disk_format": "raw",
"container_format": "bare",
}
return image_meta
def get_flavor(self):
flavor = {
"id": "fake.flavor",
"root_gb": 10,
}
return flavor
def test_base_image_properties(self):
image = self.get_image()
# Verify that we inherit all the needed keys
sys_meta = utils.get_system_metadata_from_image(image)
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that everything else is ignored
self.assertEqual(len(sys_meta), len(utils.SM_INHERITABLE_KEYS))
def test_inherit_image_properties(self):
image = self.get_image()
image["properties"] = {"foo1": "bar", "foo2": "baz"}
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that we inherit all the image properties
for key, expected in six.iteritems(image["properties"]):
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(sys_meta[sys_key], expected)
def test_skip_image_properties(self):
image = self.get_image()
image["properties"] = {
"foo1": "bar", "foo2": "baz",
"mappings": "wizz", "img_block_device_mapping": "eek",
}
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that we inherit all the image properties
for key, expected in six.iteritems(image["properties"]):
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
if key in utils.SM_SKIP_KEYS:
self.assertNotIn(sys_key, sys_meta)
else:
self.assertEqual(sys_meta[sys_key], expected)
def test_vhd_min_disk_image(self):
image = self.get_image()
flavor = self.get_flavor()
image["disk_format"] = "vhd"
sys_meta = utils.get_system_metadata_from_image(image, flavor)
# Verify that the min_disk property is taken from
# flavor's root_gb when using vhd disk format
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, "min_disk")
self.assertEqual(sys_meta[sys_key], flavor["root_gb"])
def test_dont_inherit_empty_values(self):
image = self.get_image()
for key in utils.SM_INHERITABLE_KEYS:
image[key] = None
sys_meta = utils.get_system_metadata_from_image(image)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertNotIn(sys_key, sys_meta)
class GetImageFromSystemMetadataTestCase(test.NoDBTestCase):
def get_system_metadata(self):
sys_meta = {
"image_min_ram": 1,
"image_min_disk": 1,
"image_disk_format": "raw",
"image_container_format": "bare",
}
return sys_meta
def test_image_from_system_metadata(self):
sys_meta = self.get_system_metadata()
sys_meta["%soo1" % utils.SM_IMAGE_PROP_PREFIX] = "bar"
sys_meta["%soo2" % utils.SM_IMAGE_PROP_PREFIX] = "baz"
sys_meta["%simg_block_device_mapping" %
utils.SM_IMAGE_PROP_PREFIX] = "eek"
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that we inherit all the needed keys
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image[key], sys_meta.get(sys_key))
# Verify that we inherit the rest of metadata as properties
self.assertIn("properties", image)
for key, value in six.iteritems(image["properties"]):
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
self.assertEqual(image["properties"][key], sys_meta[sys_key])
self.assertNotIn("img_block_device_mapping", image["properties"])
def test_dont_inherit_empty_values(self):
sys_meta = self.get_system_metadata()
for key in utils.SM_INHERITABLE_KEYS:
sys_key = "%s%s" % (utils.SM_IMAGE_PROP_PREFIX, key)
sys_meta[sys_key] = None
image = utils.get_image_from_system_metadata(sys_meta)
# Verify that the empty properties have not been inherited
for key in utils.SM_INHERITABLE_KEYS:
self.assertNotIn(key, image)
class GetImageMetadataFromVolumeTestCase(test.NoDBTestCase):
def test_inherit_image_properties(self):
properties = {"fake_prop": "fake_value"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(properties, image_meta["properties"])
def test_image_size(self):
volume = {"size": 10}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(10 * units.Gi, image_meta["size"])
def test_image_status(self):
volume = {}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual("active", image_meta["status"])
def test_values_conversion(self):
properties = {"min_ram": "5", "min_disk": "7"}
volume = {"volume_image_metadata": properties}
image_meta = utils.get_image_metadata_from_volume(volume)
self.assertEqual(5, image_meta["min_ram"])
self.assertEqual(7, image_meta["min_disk"])
class VersionTestCase(test.NoDBTestCase):
def test_convert_version_to_int(self):
self.assertEqual(utils.convert_version_to_int('6.2.0'), 6002000)
self.assertEqual(utils.convert_version_to_int((6, 4, 3)), 6004003)
self.assertEqual(utils.convert_version_to_int((5, )), 5)
self.assertRaises(exception.NovaException,
utils.convert_version_to_int, '5a.6b')
def test_convert_version_to_string(self):
self.assertEqual(utils.convert_version_to_str(6007000), '6.7.0')
self.assertEqual(utils.convert_version_to_str(4), '4')
def test_convert_version_to_tuple(self):
self.assertEqual(utils.convert_version_to_tuple('6.7.0'), (6, 7, 0))
class ConstantTimeCompareTestCase(test.NoDBTestCase):
def test_constant_time_compare(self):
self.assertTrue(utils.constant_time_compare("abcd1234", "abcd1234"))
self.assertFalse(utils.constant_time_compare("abcd1234", "a"))
self.assertFalse(utils.constant_time_compare("abcd1234", "ABCD234"))
class ResourceFilterTestCase(test.NoDBTestCase):
def _assert_filtering(self, res_list, filts, expected_tags):
actual_tags = utils.filter_and_format_resource_metadata('instance',
res_list, filts, 'metadata')
self.assertJsonEqual(expected_tags, actual_tags)
def test_filter_and_format_resource_metadata(self):
# Create some tags
# One overlapping pair, and one different key value pair
# i1 : foo=bar, bax=wibble
# i2 : foo=bar, baz=quux
# resources
i1 = {
'uuid': '1',
'metadata': {'foo': 'bar', 'bax': 'wibble'},
}
i2 = {
'uuid': '2',
'metadata': {'foo': 'bar', 'baz': 'quux'},
}
# Resources list
rl = [i1, i2]
# tags
i11 = {'instance_id': '1', 'key': 'foo', 'value': 'bar'}
i12 = {'instance_id': '1', 'key': 'bax', 'value': 'wibble'}
i21 = {'instance_id': '2', 'key': 'foo', 'value': 'bar'}
i22 = {'instance_id': '2', 'key': 'baz', 'value': 'quux'}
# No filter
self._assert_filtering(rl, [], [i11, i12, i21, i22])
self._assert_filtering(rl, {}, [i11, i12, i21, i22])
# Key search
# Both should have tags with key 'foo' and value 'bar'
self._assert_filtering(rl, {'key': 'foo', 'value': 'bar'}, [i11, i21])
# Both should have tags with key 'foo'
self._assert_filtering(rl, {'key': 'foo'}, [i11, i21])
# Only i2 should have tags with key 'baz' and value 'quux'
self._assert_filtering(rl, {'key': 'baz', 'value': 'quux'}, [i22])
# Only i2 should have tags with value 'quux'
self._assert_filtering(rl, {'value': 'quux'}, [i22])
# Empty list should be returned when no tags match
self._assert_filtering(rl, {'key': 'split', 'value': 'banana'}, [])
# Multiple values
# Only i2 should have tags with key 'baz' and values in the set
# ['quux', 'wibble']
self._assert_filtering(rl, {'key': 'baz', 'value': ['quux', 'wibble']},
[i22])
# But when specified as two different filters, no tags should be
# returned. This is because, the filter will mean "return tags which
# have (key=baz AND value=quux) AND (key=baz AND value=wibble)
self._assert_filtering(rl, [{'key': 'baz', 'value': 'quux'},
{'key': 'baz', 'value': 'wibble'}], [])
# Test for regex
self._assert_filtering(rl, {'value': '\\Aqu..*\\Z(?s)'}, [i22])
# Make sure bug #1365887 is fixed
i1['metadata']['key3'] = 'a'
self._assert_filtering(rl, {'value': 'banana'}, [])
class SafeTruncateTestCase(test.NoDBTestCase):
def test_exception_to_dict_with_long_message_3_bytes(self):
# Generate Chinese byte string whose length is 300. This Chinese UTF-8
# character occupies 3 bytes. After truncating, the byte string length
# should be 255.
msg = u'\u8d75' * 100
truncated_msg = utils.safe_truncate(msg, 255)
byte_message = encodeutils.safe_encode(truncated_msg)
self.assertEqual(255, len(byte_message))
def test_exception_to_dict_with_long_message_2_bytes(self):
# Generate Russian byte string whose length is 300. This Russian UTF-8
# character occupies 2 bytes. After truncating, the byte string length
# should be 254.
msg = encodeutils.safe_decode('\xd0\x92' * 150)
truncated_msg = utils.safe_truncate(msg, 255)
byte_message = encodeutils.safe_encode(truncated_msg)
self.assertEqual(254, len(byte_message))
class SpawnNTestCase(test.NoDBTestCase):
def setUp(self):
super(SpawnNTestCase, self).setUp()
self.useFixture(context_fixture.ClearRequestContext())
self.spawn_name = 'spawn_n'
def test_spawn_n_no_context(self):
self.assertIsNone(common_context.get_current())
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual('test', args[0])
def fake(arg):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, 'test')
self.assertIsNone(common_context.get_current())
def test_spawn_n_context(self):
self.assertIsNone(common_context.get_current())
ctxt = context.RequestContext('user', 'project')
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual(ctxt, args[0])
self.assertEqual('test', kwargs['kwarg1'])
def fake(context, kwarg1=None):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test')
self.assertEqual(ctxt, common_context.get_current())
def test_spawn_n_context_different_from_passed(self):
self.assertIsNone(common_context.get_current())
ctxt = context.RequestContext('user', 'project')
ctxt_passed = context.RequestContext('user', 'project',
overwrite=False)
self.assertEqual(ctxt, common_context.get_current())
def _fake_spawn(func, *args, **kwargs):
# call the method to ensure no error is raised
func(*args, **kwargs)
self.assertEqual(ctxt_passed, args[0])
self.assertEqual('test', kwargs['kwarg1'])
def fake(context, kwarg1=None):
pass
with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test')
self.assertEqual(ctxt, common_context.get_current())
class SpawnTestCase(SpawnNTestCase):
def setUp(self):
super(SpawnTestCase, self).setUp()
self.spawn_name = 'spawn'
|
|
import json
import mock
import netaddr
from neutron_lib import exceptions as n_exc
from quark import exceptions as qexceptions
from quark.tests.functional.plugin_modules import test_floating_ips
class TestScalingIP(test_floating_ips.BaseFloatingIPTest):
def setUp(self):
super(TestScalingIP, self).setUp()
self.scaling_network = self.floating_network
def test_create_scaling_ip(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.assertIn(netaddr.IPAddress(scip['scaling_ip_address']),
list(netaddr.IPNetwork(self.pub_net_cidr)))
self.assertEqual(self.scaling_network['id'],
scip['scaling_network_id'])
self.assertEqual(2, len(scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
port2_fixed_ip = self.user_port2['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(self.user_port2['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
self.assertIn(port2_fixed_ip, scip_ports.values())
self.mock_requests.post.assert_called_once_with(
self.FAKE_UNICORN_URL, data=mock.ANY, timeout=2
)
actual_body = json.loads(self.mock_requests.post.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1, self.user_port2],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
get_scip = self.plugin.get_scalingip(self.context, scip['id'])
self.assertEqual(scip['scaling_ip_address'],
get_scip['scaling_ip_address'])
def test_create_with_invalid_scaling_network_id(self):
scaling_ip = dict(
scaling_network_id='some-wrong-network-id',
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
self.assertRaises(n_exc.NetworkNotFound,
self.plugin.create_scalingip,
self.context, {"scalingip": scaling_ip})
def test_create_with_scaling_network_invalid_segment(self):
scaling_ip = dict(
scaling_network_id=self.user_network['id'],
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
self.assertRaises(n_exc.IpAddressGenerationFailure,
self.plugin.create_scalingip,
self.context, {"scalingip": scaling_ip})
def test_update_scaling_ip_add_port(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(2, len(updated_scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in updated_scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
port2_fixed_ip = self.user_port2['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(self.user_port2['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
self.assertIn(port2_fixed_ip, scip_ports.values())
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.delete.called)
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.mock_requests.put.assert_called_once_with(
expected_url, data=mock.ANY, timeout=2)
actual_body = json.loads(self.mock_requests.put.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1, self.user_port2],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
def test_update_scaling_ip_remove_port_with_remaining_ports(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[dict(port_id=self.user_port1['id'])])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(1, len(updated_scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in updated_scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.delete.called)
self.mock_requests.put.assert_called_once_with(
expected_url, data=mock.ANY, timeout=2)
actual_body = json.loads(self.mock_requests.put.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
def test_update_scaling_ip_clear_ports(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(0, len(updated_scip['ports']))
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.put.called)
self.mock_requests.delete.assert_called_once_with(
expected_url, timeout=2)
def test_update_scaling_ip_add_ports_from_none(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[]
)
scaling_ip = {'scalingip': scaling_ip}
scip = self.plugin.create_scalingip(self.context, scaling_ip)
self.mock_requests.reset_mock()
scaling_ip = dict(ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])])
updated_scip = self.plugin.update_scalingip(
self.context, scip['id'], {"scalingip": scaling_ip})
self.assertEqual(self.scaling_network['id'],
updated_scip['scaling_network_id'])
self.assertEqual(updated_scip['scaling_ip_address'],
scip['scaling_ip_address'])
self.assertEqual(2, len(updated_scip['ports']))
scip_ports = {scip_port['port_id']: scip_port['fixed_ip_address']
for scip_port in updated_scip['ports']}
port1_fixed_ip = self.user_port1['fixed_ips'][0]['ip_address']
port2_fixed_ip = self.user_port2['fixed_ips'][0]['ip_address']
self.assertIn(self.user_port1['id'], scip_ports)
self.assertIn(self.user_port2['id'], scip_ports)
self.assertIn(port1_fixed_ip, scip_ports.values())
self.assertIn(port2_fixed_ip, scip_ports.values())
self.assertFalse(self.mock_requests.put.called)
self.assertFalse(self.mock_requests.delete.called)
self.mock_requests.post.assert_called_once_with(
self.FAKE_UNICORN_URL, data=mock.ANY, timeout=2)
actual_body = json.loads(self.mock_requests.post.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port1, self.user_port2],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
@mock.patch('quark.billing.notify')
@mock.patch('quark.billing.build_payload', return_value={})
def test_delete_scaling_ip(self, notify, build_payload):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scip = self.plugin.create_scalingip(
self.context, {"scalingip": scaling_ip})
self.plugin.delete_scalingip(self.context, scip['id'])
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.mock_requests.delete.assert_called_once_with(
expected_url, timeout=2
)
self.assertRaises(qexceptions.ScalingIpNotFound,
self.plugin.get_scalingip, self.context, scip['id'])
scips = self.plugin.get_scalingips(self.context)
self.assertEqual(0, len(scips))
def test_scaling_ip_not_in_floating_ip_list(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id'])]
)
scaling_ip = {'scalingip': scaling_ip}
self.plugin.create_scalingip(self.context, scaling_ip)
flips = self.plugin.get_floatingips(self.context)
self.assertEqual(0, len(flips))
def test_floating_ip_not_in_scaling_ip_list(self):
floating_ip = dict(
floating_network_id=self.scaling_network.id,
port_id=self.user_port1['id']
)
floating_ip = {'floatingip': floating_ip}
self.plugin.create_floatingip(self.context, floating_ip)
scips = self.plugin.get_scalingips(self.context)
self.assertEqual(0, len(scips))
def test_delete_port_associated_with_scip_with_multiple_ports(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id']),
dict(port_id=self.user_port2['id'])]
)
scip = self.plugin.create_scalingip(
self.context, {"scalingip": scaling_ip})
self.mock_requests.reset_mock()
self.context.session.expire_all()
self.plugin.delete_port(self.context, self.user_port1['id'])
after_scip = self.plugin.get_scalingip(self.context, scip['id'])
self.assertEqual(1, len(after_scip['ports']))
self.assertEqual(self.user_port2['id'],
after_scip['ports'][0]['port_id'])
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.delete.called)
self.mock_requests.put.assert_called_once_with(
expected_url, data=mock.ANY, timeout=2)
actual_body = json.loads(self.mock_requests.put.call_args[1]['data'])
unicorn_body = self._build_expected_unicorn_request_body(
scip['scaling_ip_address'], [self.user_port2],
actual_body=actual_body
)
self.assertEqual(unicorn_body, actual_body,
msg="Request to the unicorn API is not what is "
"expected.")
def test_delete_port_associated_with_scip_with_one_port(self):
scaling_ip = dict(
scaling_network_id=self.scaling_network.id,
ports=[dict(port_id=self.user_port1['id'])]
)
scip = self.plugin.create_scalingip(
self.context, {"scalingip": scaling_ip})
self.mock_requests.reset_mock()
self.context.session.expire_all()
self.plugin.delete_port(self.context, self.user_port1['id'])
after_scip = self.plugin.get_scalingip(self.context, scip['id'])
self.assertEqual(0, len(after_scip['ports']))
expected_url = '/'.join([self.FAKE_UNICORN_URL,
scip['scaling_ip_address']])
self.assertFalse(self.mock_requests.post.called)
self.assertFalse(self.mock_requests.put.called)
self.mock_requests.delete.assert_called_once_with(
expected_url, timeout=2)
|
|
from __future__ import unicode_literals
import csv
import pytz
from furl import furl
from datetime import datetime, timedelta
from django.db.models import Q
from django.views.defaults import page_not_found
from django.views.generic import FormView, DeleteView, ListView, TemplateView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.core.urlresolvers import reverse
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from osf.models.user import OSFUser
from osf.models.node import Node, NodeLog
from osf.models.spam import SpamStatus
from framework.auth import get_user
from framework.auth.utils import impute_names
from framework.auth.core import generate_verification_key
from website.mailchimp_utils import subscribe_on_confirm
from website import search
from admin.base.views import GuidView
from osf.models.admin_log_entry import (
update_admin_log,
USER_2_FACTOR,
USER_EMAILED,
USER_REMOVED,
USER_RESTORED,
CONFIRM_SPAM,
REINDEX_ELASTIC,
)
from admin.users.serializers import serialize_user
from admin.users.forms import EmailResetForm, WorkshopForm, UserSearchForm, MergeUserForm
from admin.users.templatetags.user_extras import reverse_user
from website.settings import DOMAIN, OSF_SUPPORT_EMAIL
class UserDeleteView(PermissionRequiredMixin, DeleteView):
""" Allow authorised admin user to remove/restore user
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_user.html'
context_object_name = 'user'
object = None
permission_required = 'osf.change_osfuser'
raise_exception = True
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
if user.date_disabled is None or kwargs.get('is_spam'):
user.disable_account()
user.is_registered = False
if 'spam_flagged' in user.system_tags:
user.tags.through.objects.filter(tag__name='spam_flagged').delete()
if 'ham_confirmed' in user.system_tags:
user.tags.through.objects.filter(tag__name='ham_confirmed').delete()
if kwargs.get('is_spam') and 'spam_confirmed' not in user.system_tags:
user.add_system_tag('spam_confirmed')
flag = USER_REMOVED
message = 'User account {} disabled'.format(user.pk)
else:
user.requested_deactivation = False
user.date_disabled = None
subscribe_on_confirm(user)
user.is_registered = True
user.tags.through.objects.filter(tag__name__in=['spam_flagged', 'spam_confirmed'], tag__system=True).delete()
if 'ham_confirmed' not in user.system_tags:
user.add_system_tag('ham_confirmed')
flag = USER_RESTORED
message = 'User account {} reenabled'.format(user.pk)
user.save()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message=message,
action_flag=flag
)
return redirect(reverse_user(self.kwargs.get('guid')))
def get_context_data(self, **kwargs):
context = {}
context.setdefault('guid', kwargs.get('object')._id)
return super(UserDeleteView, self).get_context_data(**context)
def get_object(self, queryset=None):
return OSFUser.load(self.kwargs.get('guid'))
class SpamUserDeleteView(UserDeleteView):
"""
Allow authorized admin user to delete a spam user and mark all their nodes as private
"""
template_name = 'users/remove_spam_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if not node.is_registration and not node.is_spam:
node.confirm_spam(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed SPAM: {} when user {} marked as spam'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': True})
return super(SpamUserDeleteView, self).delete(request, *args, **kwargs)
class HamUserRestoreView(UserDeleteView):
"""
Allow authorized admin user to undelete a ham user
"""
template_name = 'users/restore_ham_user.html'
def delete(self, request, *args, **kwargs):
try:
user = self.get_object()
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
if user:
for node in user.contributor_to:
if node.is_spam:
node.confirm_ham(save=True)
update_admin_log(
user_id=request.user.id,
object_id=node._id,
object_repr='Node',
message='Confirmed HAM: {} when user {} marked as ham'.format(node._id, user._id),
action_flag=CONFIRM_SPAM
)
kwargs.update({'is_spam': False})
return super(HamUserRestoreView, self).delete(request, *args, **kwargs)
class UserSpamList(PermissionRequiredMixin, ListView):
SPAM_TAG = 'spam_flagged'
paginate_by = 25
paginate_orphans = 1
ordering = ('date_disabled')
context_object_name = '-osfuser'
permission_required = ('osf.view_spam', 'osf.view_osfuser')
raise_exception = True
def get_queryset(self):
return OSFUser.objects.filter(tags__name=self.SPAM_TAG).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(
query_set, page_size)
return {
'users': list(map(serialize_user, query_set)),
'page': page,
}
class UserFlaggedSpamList(UserSpamList, DeleteView):
SPAM_TAG = 'spam_flagged'
template_name = 'users/flagged_spam_list.html'
def delete(self, request, *args, **kwargs):
if not request.user.get_perms('osf.mark_spam'):
raise PermissionDenied("You don't have permission to update this user's spam status.")
user_ids = [
uid for uid in request.POST.keys()
if uid != 'csrfmiddlewaretoken'
]
for uid in user_ids:
user = OSFUser.load(uid)
if 'spam_flagged' in user.system_tags:
user.system_tags.remove('spam_flagged')
user.add_system_tag('spam_confirmed')
user.save()
update_admin_log(
user_id=self.request.user.id,
object_id=uid,
object_repr='User',
message='Confirmed SPAM: {}'.format(uid),
action_flag=CONFIRM_SPAM
)
return redirect('users:flagged-spam')
class UserKnownSpamList(UserSpamList):
SPAM_TAG = 'spam_confirmed'
template_name = 'users/known_spam_list.html'
class UserKnownHamList(UserSpamList):
SPAM_TAG = 'ham_confirmed'
template_name = 'users/known_spam_list.html'
class User2FactorDeleteView(UserDeleteView):
""" Allow authorised admin user to remove 2 factor authentication.
Interface with OSF database. No admin models.
"""
template_name = 'users/remove_2_factor.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
try:
user.delete_addon('twofactor')
except AttributeError:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Removed 2 factor auth for user {}'.format(user.pk),
action_flag=USER_2_FACTOR
)
return redirect(reverse_user(self.kwargs.get('guid')))
class UserFormView(PermissionRequiredMixin, FormView):
template_name = 'users/search.html'
object_type = 'osfuser'
permission_required = 'osf.view_osfuser'
raise_exception = True
form_class = UserSearchForm
def __init__(self, *args, **kwargs):
self.redirect_url = None
super(UserFormView, self).__init__(*args, **kwargs)
def form_valid(self, form):
guid = form.cleaned_data['guid']
name = form.cleaned_data['name']
email = form.cleaned_data['email']
if guid or email:
if email:
try:
user = OSFUser.objects.filter(Q(username=email) | Q(emails__address=email)).get()
guid = user.guids.first()._id
except OSFUser.DoesNotExist:
return page_not_found(self.request, AttributeError('User with email address {} not found.'.format(email)))
self.redirect_url = reverse('users:user', kwargs={'guid': guid})
elif name:
self.redirect_url = reverse('users:search_list', kwargs={'name': name})
return super(UserFormView, self).form_valid(form)
@property
def success_url(self):
return self.redirect_url
class UserMergeAccounts(PermissionRequiredMixin, FormView):
template_name = 'users/merge_accounts_modal.html'
permission_required = 'osf.view_osfuser'
object_type = 'osfuser'
raise_exception = True
form_class = MergeUserForm
def get_context_data(self, **kwargs):
return {'guid': self.get_object()._id}
def get_object(self, queryset=None):
return OSFUser.load(self.kwargs.get('guid'))
def form_valid(self, form):
user = self.get_object()
guid_to_be_merged = form.cleaned_data['user_guid_to_be_merged']
user_to_be_merged = OSFUser.objects.get(guids___id=guid_to_be_merged, guids___id__isnull=False)
user.merge_user(user_to_be_merged)
return redirect(reverse_user(user._id))
def form_invalid(self, form):
raise Http404(
'{} not found.'.format(
form.cleaned_data.get('user_guid_to_be_merged', 'guid')
))
class UserSearchList(PermissionRequiredMixin, ListView):
template_name = 'users/list.html'
permission_required = 'osf.view_osfuser'
raise_exception = True
form_class = UserSearchForm
paginate_by = 25
def get_queryset(self):
query = OSFUser.objects.filter(fullname__icontains=self.kwargs['name']).only(
'guids', 'fullname', 'username', 'date_confirmed', 'date_disabled'
)
return query
def get_context_data(self, **kwargs):
users = self.get_queryset()
page_size = self.get_paginate_by(users)
paginator, page, query_set, is_paginated = self.paginate_queryset(users, page_size)
kwargs['page'] = page
kwargs['users'] = [{
'name': user.fullname,
'username': user.username,
'id': user.guids.first()._id,
'confirmed': user.date_confirmed,
'disabled': user.date_disabled if user.is_disabled else None
} for user in query_set]
return super(UserSearchList, self).get_context_data(**kwargs)
class UserView(PermissionRequiredMixin, GuidView):
template_name = 'users/user.html'
context_object_name = 'user'
permission_required = 'osf.view_osfuser'
raise_exception = True
def get_context_data(self, **kwargs):
kwargs = super(UserView, self).get_context_data(**kwargs)
kwargs.update({'SPAM_STATUS': SpamStatus}) # Pass spam status in to check against
return kwargs
def get_object(self, queryset=None):
return serialize_user(OSFUser.load(self.kwargs.get('guid')))
class UserWorkshopFormView(PermissionRequiredMixin, FormView):
form_class = WorkshopForm
object_type = 'user'
template_name = 'users/workshop.html'
permission_required = 'osf.view_osfuser'
raise_exception = True
def form_valid(self, form):
csv_file = form.cleaned_data['document']
final = self.parse(csv_file)
file_name = csv_file.name
results_file_name = '{}_user_stats.csv'.format(file_name.replace(' ', '_').strip('.csv'))
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="{}"'.format(results_file_name)
writer = csv.writer(response)
for row in final:
writer.writerow(row)
return response
@staticmethod
def find_user_by_email(email):
user_list = OSFUser.objects.filter(emails__address=email)
return user_list[0] if user_list.exists() else None
@staticmethod
def find_user_by_full_name(full_name):
user_list = OSFUser.objects.filter(fullname=full_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def find_user_by_family_name(family_name):
user_list = OSFUser.objects.filter(family_name=family_name)
return user_list[0] if user_list.count() == 1 else None
@staticmethod
def get_num_logs_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return NodeLog.objects.filter(user=user, date__gt=query_date).count()
@staticmethod
def get_num_nodes_since_workshop(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return Node.objects.filter(creator=user, created__gt=query_date).count()
@staticmethod
def get_user_latest_log(user, workshop_date):
query_date = workshop_date + timedelta(days=1)
return NodeLog.objects.filter(user=user, date__gt=query_date).latest('date')
def parse(self, csv_file):
""" Parse and add to csv file.
:param csv_file: Comma separated
:return: A list
"""
result = []
csv_reader = csv.reader(csv_file)
for index, row in enumerate(csv_reader):
if index == 0:
row.extend([
'OSF ID', 'Logs Since Workshop', 'Nodes Created Since Workshop', 'Last Log Date'
])
result.append(row)
continue
email = row[5]
user_by_email = self.find_user_by_email(email)
if not user_by_email:
full_name = row[4]
try:
family_name = impute_names(full_name)['family']
except UnicodeDecodeError:
row.extend(['Unable to parse name'])
result.append(row)
continue
user_by_name = self.find_user_by_full_name(full_name) or self.find_user_by_family_name(family_name)
if not user_by_name:
row.extend(['', 0, 0, ''])
result.append(row)
continue
else:
user = user_by_name
else:
user = user_by_email
workshop_date = pytz.utc.localize(datetime.strptime(row[1], '%m/%d/%y'))
nodes = self.get_num_nodes_since_workshop(user, workshop_date)
user_logs = self.get_num_logs_since_workshop(user, workshop_date)
last_log_date = self.get_user_latest_log(user, workshop_date).date.strftime('%m/%d/%y') if user_logs else ''
row.extend([
user._id, user_logs, nodes, last_log_date
])
result.append(row)
return result
def form_invalid(self, form):
super(UserWorkshopFormView, self).form_invalid(form)
class GetUserLink(PermissionRequiredMixin, TemplateView):
permission_required = 'osf.change_osfuser'
template_name = 'users/get_link.html'
raise_exception = True
def get_link(self, user):
raise NotImplementedError()
def get_link_type(self):
# Used in the title of the link modal
raise NotImplementedError()
def get_claim_links(self, user):
return None
def get_context_data(self, **kwargs):
user = OSFUser.load(self.kwargs.get('guid'))
kwargs['user_link'] = self.get_link(user)
kwargs['username'] = user.username
kwargs['title'] = self.get_link_type()
kwargs['node_claim_links'] = self.get_claim_links(user)
return super(GetUserLink, self).get_context_data(**kwargs)
class GetUserConfirmationLink(GetUserLink):
def get_link(self, user):
return user.get_confirmation_url(user.username, force=True)
def get_link_type(self):
return 'User Confirmation'
class GetPasswordResetLink(GetUserLink):
def get_link(self, user):
user.verification_key_v2 = generate_verification_key(verification_type='password')
user.verification_key_v2['expires'] = datetime.utcnow().replace(tzinfo=pytz.utc) + timedelta(hours=48)
user.save()
reset_abs_url = furl(DOMAIN)
reset_abs_url.path.add(('resetpassword/{}/{}'.format(user._id, user.verification_key_v2['token'])))
return reset_abs_url
def get_link_type(self):
return 'Password Reset'
class GetUserClaimLinks(GetUserLink):
def get_claim_links(self, user):
links = []
for guid, value in user.unclaimed_records.items():
node = Node.load(guid)
url = '{base_url}user/{uid}/{project_id}/claim/?token={token}'.format(
base_url=DOMAIN,
uid=user._id,
project_id=guid,
token=value['token']
)
links.append('Claim URL for node {}: {}'.format(node._id, url))
return links or ['User currently has no active unclaimed records for any nodes.']
def get_link(self, user):
return None
def get_link_type(self):
return 'Claim User'
class ResetPasswordView(PermissionRequiredMixin, FormView):
form_class = EmailResetForm
template_name = 'users/reset.html'
context_object_name = 'user'
permission_required = 'osf.change_osfuser'
raise_exception = True
def dispatch(self, request, *args, **kwargs):
self.user = OSFUser.load(self.kwargs.get('guid'))
if self.user is None:
raise Http404(
'{} with id "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid')
))
return super(ResetPasswordView, self).dispatch(request, *args, **kwargs)
def get_initial(self):
self.initial = {
'guid': self.user._id,
'emails': [(r, r) for r in self.user.emails.values_list('address', flat=True)],
}
return super(ResetPasswordView, self).get_initial()
def get_context_data(self, **kwargs):
kwargs.setdefault('guid', self.user._id)
kwargs.setdefault('emails', self.user.emails)
return super(ResetPasswordView, self).get_context_data(**kwargs)
def form_valid(self, form):
email = form.cleaned_data.get('emails')
user = get_user(email)
if user is None or user._id != self.kwargs.get('guid'):
return HttpResponse(
'{} with id "{}" and email "{}" not found.'.format(
self.context_object_name.title(),
self.kwargs.get('guid'),
email
),
status=409
)
reset_abs_url = furl(DOMAIN)
user.verification_key_v2 = generate_verification_key(verification_type='password')
user.save()
reset_abs_url.path.add(('resetpassword/{}/{}'.format(user._id, user.verification_key_v2['token'])))
send_mail(
subject='Reset OSF Password',
message='Follow this link to reset your password: {}'.format(
reset_abs_url.url
),
from_email=OSF_SUPPORT_EMAIL,
recipient_list=[email]
)
update_admin_log(
user_id=self.request.user.id,
object_id=user.pk,
object_repr='User',
message='Emailed user {} a reset link.'.format(user.pk),
action_flag=USER_EMAILED
)
return super(ResetPasswordView, self).form_valid(form)
@property
def success_url(self):
return reverse_user(self.kwargs.get('guid'))
class UserReindexElastic(UserDeleteView):
template_name = 'users/reindex_user_elastic.html'
def delete(self, request, *args, **kwargs):
user = self.get_object()
search.search.update_user(user, async_update=False)
update_admin_log(
user_id=self.request.user.id,
object_id=user._id,
object_repr='User',
message='User Reindexed (Elastic): {}'.format(user._id),
action_flag=REINDEX_ELASTIC
)
return redirect(reverse_user(self.kwargs.get('guid')))
|
|
import unittest
import numpy as np
import unittest
# from mock_plpy import MockPlPy
# plpy = MockPlPy()
#
# import sys
# sys.modules['plpy'] = plpy
from helper import plpy, fixture_file
import crankshaft.space_time_dynamics as std
from crankshaft import random_seeds
import json
class SpaceTimeTests(unittest.TestCase):
"""Testing class for Markov Functions."""
def setUp(self):
plpy._reset()
self.params = {"id_col": "cartodb_id",
"time_cols": ['dec_2013', 'jan_2014', 'feb_2014'],
"subquery": "SELECT * FROM a_list",
"geom_col": "the_geom",
"num_ngbrs": 321}
self.neighbors_data = json.loads(open(fixture_file('neighbors_markov.json')).read())
self.markov_data = json.loads(open(fixture_file('markov.json')).read())
self.time_data = np.array([i * np.ones(10, dtype=float) for i in range(10)]).T
self.transition_matrix = np.array([
[[ 0.96341463, 0.0304878 , 0.00609756, 0. , 0. ],
[ 0.06040268, 0.83221477, 0.10738255, 0. , 0. ],
[ 0. , 0.14 , 0.74 , 0.12 , 0. ],
[ 0. , 0.03571429, 0.32142857, 0.57142857, 0.07142857],
[ 0. , 0. , 0. , 0.16666667, 0.83333333]],
[[ 0.79831933, 0.16806723, 0.03361345, 0. , 0. ],
[ 0.0754717 , 0.88207547, 0.04245283, 0. , 0. ],
[ 0.00537634, 0.06989247, 0.8655914 , 0.05913978, 0. ],
[ 0. , 0. , 0.06372549, 0.90196078, 0.03431373],
[ 0. , 0. , 0. , 0.19444444, 0.80555556]],
[[ 0.84693878, 0.15306122, 0. , 0. , 0. ],
[ 0.08133971, 0.78947368, 0.1291866 , 0. , 0. ],
[ 0.00518135, 0.0984456 , 0.79274611, 0.0984456 , 0.00518135],
[ 0. , 0. , 0.09411765, 0.87058824, 0.03529412],
[ 0. , 0. , 0. , 0.10204082, 0.89795918]],
[[ 0.8852459 , 0.09836066, 0. , 0.01639344, 0. ],
[ 0.03875969, 0.81395349, 0.13953488, 0. , 0.00775194],
[ 0.0049505 , 0.09405941, 0.77722772, 0.11881188, 0.0049505 ],
[ 0. , 0.02339181, 0.12865497, 0.75438596, 0.09356725],
[ 0. , 0. , 0. , 0.09661836, 0.90338164]],
[[ 0.33333333, 0.66666667, 0. , 0. , 0. ],
[ 0.0483871 , 0.77419355, 0.16129032, 0.01612903, 0. ],
[ 0.01149425, 0.16091954, 0.74712644, 0.08045977, 0. ],
[ 0. , 0.01036269, 0.06217617, 0.89637306, 0.03108808],
[ 0. , 0. , 0. , 0.02352941, 0.97647059]]]
)
def test_spatial_markov(self):
"""Test Spatial Markov."""
data = [ { 'id': d['id'],
'attr1': d['y1995'],
'attr2': d['y1996'],
'attr3': d['y1997'],
'attr4': d['y1998'],
'attr5': d['y1999'],
'attr6': d['y2000'],
'attr7': d['y2001'],
'attr8': d['y2002'],
'attr9': d['y2003'],
'attr10': d['y2004'],
'attr11': d['y2005'],
'attr12': d['y2006'],
'attr13': d['y2007'],
'attr14': d['y2008'],
'attr15': d['y2009'],
'neighbors': d['neighbors'] } for d in self.neighbors_data]
print(str(data[0]))
plpy._define_result('select', data)
random_seeds.set_random_seeds(1234)
result = std.spatial_markov_trend('subquery', ['y1995', 'y1996', 'y1997', 'y1998', 'y1999', 'y2000', 'y2001', 'y2002', 'y2003', 'y2004', 'y2005', 'y2006', 'y2007', 'y2008', 'y2009'], 5, 'knn', 5, 0, 'the_geom', 'cartodb_id')
self.assertTrue(result != None)
result = [(row[0], row[1], row[2], row[3], row[4]) for row in result]
print result[0]
expected = self.markov_data
for ([res_trend, res_up, res_down, res_vol, res_id],
[exp_trend, exp_up, exp_down, exp_vol, exp_id]
) in zip(result, expected):
self.assertAlmostEqual(res_trend, exp_trend)
def test_get_time_data(self):
"""Test get_time_data"""
data = [ { 'attr1': d['y1995'],
'attr2': d['y1996'],
'attr3': d['y1997'],
'attr4': d['y1998'],
'attr5': d['y1999'],
'attr6': d['y2000'],
'attr7': d['y2001'],
'attr8': d['y2002'],
'attr9': d['y2003'],
'attr10': d['y2004'],
'attr11': d['y2005'],
'attr12': d['y2006'],
'attr13': d['y2007'],
'attr14': d['y2008'],
'attr15': d['y2009'] } for d in self.neighbors_data]
result = std.get_time_data(data, ['y1995', 'y1996', 'y1997', 'y1998', 'y1999', 'y2000', 'y2001', 'y2002', 'y2003', 'y2004', 'y2005', 'y2006', 'y2007', 'y2008', 'y2009'])
## expected was prepared from PySAL example:
### f = ps.open(ps.examples.get_path("usjoin.csv"))
### pci = np.array([f.by_col[str(y)] for y in range(1995, 2010)]).transpose()
### rpci = pci / (pci.mean(axis = 0))
expected = np.array([[ 0.87654416, 0.863147, 0.85637567, 0.84811668, 0.8446154, 0.83271652
, 0.83786314, 0.85012593, 0.85509656, 0.86416612, 0.87119375, 0.86302631
, 0.86148267, 0.86252252, 0.86746356],
[ 0.9188951, 0.91757931, 0.92333258, 0.92517289, 0.92552388, 0.90746978
, 0.89830489, 0.89431991, 0.88924794, 0.89815176, 0.91832091, 0.91706054
, 0.90139505, 0.87897455, 0.86216858],
[ 0.82591007, 0.82548596, 0.81989793, 0.81503235, 0.81731522, 0.78964559
, 0.80584442, 0.8084998, 0.82258551, 0.82668196, 0.82373724, 0.81814804
, 0.83675961, 0.83574199, 0.84647177],
[ 1.09088176, 1.08537689, 1.08456418, 1.08415404, 1.09898841, 1.14506948
, 1.12151133, 1.11160697, 1.10888621, 1.11399806, 1.12168029, 1.13164797
, 1.12958508, 1.11371818, 1.09936775],
[ 1.10731446, 1.11373944, 1.13283638, 1.14472559, 1.15910025, 1.16898201
, 1.17212488, 1.14752303, 1.11843284, 1.11024964, 1.11943471, 1.11736468
, 1.10863242, 1.09642516, 1.07762337],
[ 1.42269757, 1.42118434, 1.44273502, 1.43577571, 1.44400684, 1.44184737
, 1.44782832, 1.41978227, 1.39092208, 1.4059372, 1.40788646, 1.44052766
, 1.45241216, 1.43306098, 1.4174431 ],
[ 1.13073885, 1.13110513, 1.11074708, 1.13364636, 1.13088149, 1.10888138
, 1.11856629, 1.13062931, 1.11944984, 1.12446239, 1.11671008, 1.10880034
, 1.08401709, 1.06959206, 1.07875225],
[ 1.04706124, 1.04516831, 1.04253372, 1.03239987, 1.02072545, 0.99854316
, 0.9880258, 0.99669587, 0.99327676, 1.01400905, 1.03176742, 1.040511
, 1.01749645, 0.9936394, 0.98279746],
[ 0.98996986, 1.00143564, 0.99491, 1.00188408, 1.00455845, 0.99127006
, 0.97925917, 0.9683482, 0.95335147, 0.93694787, 0.94308213, 0.92232874
, 0.91284091, 0.89689833, 0.88928858],
[ 0.87418391, 0.86416601, 0.84425695, 0.8404494, 0.83903044, 0.8578708
, 0.86036185, 0.86107306, 0.8500772, 0.86981998, 0.86837929, 0.87204141
, 0.86633032, 0.84946077, 0.83287146],
[ 1.14196118, 1.14660262, 1.14892712, 1.14909594, 1.14436624, 1.14450183
, 1.12349752, 1.12596664, 1.12213996, 1.1119989, 1.10257792, 1.10491258
, 1.11059842, 1.10509795, 1.10020097],
[ 0.97282463, 0.96700147, 0.96252588, 0.9653878, 0.96057687, 0.95831051
, 0.94480909, 0.94804195, 0.95430286, 0.94103989, 0.92122519, 0.91010201
, 0.89280392, 0.89298243, 0.89165385],
[ 0.94325468, 0.96436902, 0.96455242, 0.95243009, 0.94117647, 0.9480927
, 0.93539182, 0.95388718, 0.94597005, 0.96918424, 0.94781281, 0.93466815
, 0.94281559, 0.96520315, 0.96715441],
[ 0.97478408, 0.98169225, 0.98712809, 0.98474769, 0.98559897, 0.98687073
, 0.99237486, 0.98209969, 0.9877653, 0.97399471, 0.96910087, 0.98416665
, 0.98423613, 0.99823861, 0.99545704],
[ 0.85570269, 0.85575915, 0.85986132, 0.85693406, 0.8538012, 0.86191535
, 0.84981451, 0.85472102, 0.84564835, 0.83998883, 0.83478547, 0.82803648
, 0.8198736, 0.82265395, 0.8399404 ],
[ 0.87022047, 0.85996258, 0.85961813, 0.85689572, 0.83947136, 0.82785597
, 0.86008789, 0.86776298, 0.86720209, 0.8676334, 0.89179317, 0.94202108
, 0.9422231, 0.93902708, 0.94479184],
[ 0.90134907, 0.90407738, 0.90403991, 0.90201769, 0.90399238, 0.90906632
, 0.92693339, 0.93695966, 0.94242697, 0.94338265, 0.91981796, 0.91108804
, 0.90543476, 0.91737138, 0.94793657],
[ 1.1977611, 1.18222564, 1.18439158, 1.18267865, 1.19286723, 1.20172869
, 1.21328691, 1.22624778, 1.22397075, 1.23857042, 1.24419893, 1.23929384
, 1.23418676, 1.23626739, 1.26754398],
[ 1.24919678, 1.25754773, 1.26991161, 1.28020651, 1.30625667, 1.34790023
, 1.34399863, 1.32575181, 1.30795492, 1.30544841, 1.30303302, 1.32107766
, 1.32936244, 1.33001241, 1.33288462],
[ 1.06768004, 1.03799276, 1.03637303, 1.02768449, 1.03296093, 1.05059016
, 1.03405057, 1.02747623, 1.03162734, 0.9961416, 0.97356208, 0.94241549
, 0.92754547, 0.92549227, 0.92138102],
[ 1.09475614, 1.11526796, 1.11654299, 1.13103948, 1.13143264, 1.13889622
, 1.12442212, 1.13367018, 1.13982256, 1.14029944, 1.11979401, 1.10905389
, 1.10577769, 1.11166825, 1.09985155],
[ 0.76530058, 0.76612841, 0.76542451, 0.76722683, 0.76014284, 0.74480073
, 0.76098396, 0.76156903, 0.76651952, 0.76533288, 0.78205934, 0.76842416
, 0.77487118, 0.77768683, 0.78801192],
[ 0.98391336, 0.98075816, 0.98295341, 0.97386015, 0.96913803, 0.97370819
, 0.96419154, 0.97209861, 0.97441313, 0.96356162, 0.94745352, 0.93965462
, 0.93069645, 0.94020973, 0.94358232],
[ 0.83561828, 0.82298088, 0.81738502, 0.81748588, 0.80904801, 0.80071489
, 0.83358256, 0.83451613, 0.85175032, 0.85954307, 0.86790024, 0.87170334
, 0.87863799, 0.87497981, 0.87888675],
[ 0.98845573, 1.02092428, 0.99665283, 0.99141823, 0.99386619, 0.98733195
, 0.99644997, 0.99669587, 1.02559097, 1.01116651, 0.99988024, 0.97906749
, 0.99323123, 1.00204939, 0.99602148],
[ 1.14930913, 1.15241949, 1.14300962, 1.14265542, 1.13984683, 1.08312397
, 1.05192626, 1.04230892, 1.05577278, 1.08569751, 1.12443486, 1.08891079
, 1.08603695, 1.05997314, 1.02160943],
[ 1.11368269, 1.1057147, 1.11893431, 1.13778669, 1.1432272, 1.18257029
, 1.16226243, 1.16009196, 1.14467789, 1.14820235, 1.12386598, 1.12680236
, 1.12357937, 1.1159258, 1.12570828],
[ 1.30379431, 1.30752186, 1.31206366, 1.31532267, 1.30625667, 1.31210239
, 1.29989156, 1.29203193, 1.27183516, 1.26830786, 1.2617743, 1.28656675
, 1.29734097, 1.29390205, 1.29345446],
[ 0.83953719, 0.82701448, 0.82006005, 0.81188876, 0.80294864, 0.78772975
, 0.82848011, 0.8259679, 0.82435705, 0.83108634, 0.84373784, 0.83891093
, 0.84349247, 0.85637272, 0.86539395],
[ 1.23450087, 1.2426022, 1.23537935, 1.23581293, 1.24522626, 1.2256767
, 1.21126648, 1.19377804, 1.18355337, 1.19674434, 1.21536573, 1.23653297
, 1.27962009, 1.27968392, 1.25907738],
[ 0.9769662, 0.97400719, 0.98035944, 0.97581531, 0.95543282, 0.96480308
, 0.94686376, 0.93679073, 0.92540049, 0.92988835, 0.93442917, 0.92100464
, 0.91475304, 0.90249622, 0.9021363 ],
[ 0.84986886, 0.8986851, 0.84295997, 0.87280534, 0.85659368, 0.88937573
, 0.894401, 0.90448993, 0.95495898, 0.92698333, 0.94745352, 0.92562488
, 0.96635366, 1.02520312, 1.0394296 ],
[ 1.01922808, 1.00258203, 1.00974428, 1.00303417, 0.99765073, 1.00759019
, 0.99192968, 0.99747298, 0.99550759, 0.97583768, 0.9610168, 0.94779638
, 0.93759089, 0.93353431, 0.94121705],
[ 0.86367411, 0.85558932, 0.85544346, 0.85103025, 0.84336613, 0.83434854
, 0.85813595, 0.84667961, 0.84374558, 0.85951183, 0.87194227, 0.89455097
, 0.88283929, 0.90349491, 0.90600675],
[ 1.00947534, 1.00411055, 1.00698819, 0.99513687, 0.99291086, 1.00581626
, 0.98850522, 0.99291168, 0.98983209, 0.97511924, 0.96134615, 0.96382634
, 0.95011401, 0.9434686, 0.94637765],
[ 1.05712571, 1.05459419, 1.05753012, 1.04880786, 1.05103857, 1.04800023
, 1.03024941, 1.04200483, 1.0402554, 1.03296979, 1.02191682, 1.02476275
, 1.02347523, 1.02517684, 1.04359571],
[ 1.07084189, 1.06669497, 1.07937623, 1.07387988, 1.0794043, 1.0531801
, 1.07452771, 1.09383478, 1.1052447, 1.10322136, 1.09167939, 1.08772756
, 1.08859544, 1.09177338, 1.1096083 ],
[ 0.86719222, 0.86628896, 0.86675156, 0.86425632, 0.86511809, 0.86287327
, 0.85169796, 0.85411285, 0.84886336, 0.84517414, 0.84843858, 0.84488343
, 0.83374329, 0.82812044, 0.82878599],
[ 0.88389211, 0.92288667, 0.90282398, 0.91229186, 0.92023286, 0.92652175
, 0.94278865, 0.93682452, 0.98655146, 0.992237, 0.9798497, 0.93869677
, 0.96947771, 1.00362626, 0.98102351],
[ 0.97082064, 0.95320233, 0.94534081, 0.94215593, 0.93967, 0.93092109
, 0.92662519, 0.93412152, 0.93501274, 0.92879506, 0.92110542, 0.91035556
, 0.90430364, 0.89994694, 0.90073864],
[ 0.95861858, 0.95774543, 0.98254811, 0.98919472, 0.98684824, 0.98882205
, 0.97662234, 0.95601578, 0.94905385, 0.94934888, 0.97152609, 0.97163004
, 0.9700702, 0.97158948, 0.95884908],
[ 0.83980439, 0.84726737, 0.85747, 0.85467221, 0.8556751, 0.84818516
, 0.85265681, 0.84502402, 0.82645665, 0.81743586, 0.83550406, 0.83338919
, 0.83511679, 0.82136617, 0.80921874],
[ 0.95118156, 0.9466212, 0.94688098, 0.9508583, 0.9512441, 0.95440787
, 0.96364363, 0.96804412, 0.97136214, 0.97583768, 0.95571724, 0.96895368
, 0.97001634, 0.97082733, 0.98782366],
[ 1.08910044, 1.08248968, 1.08492895, 1.08656923, 1.09454249, 1.10558188
, 1.1214086, 1.12292577, 1.13021031, 1.13342735, 1.14686068, 1.14502975
, 1.14474747, 1.14084037, 1.16142926],
[ 1.06336033, 1.07365823, 1.08691496, 1.09764846, 1.11669863, 1.11856702
, 1.09764283, 1.08815849, 1.08044313, 1.09278827, 1.07003204, 1.08398066
, 1.09831768, 1.09298232, 1.09176125],
[ 0.79772065, 0.78829196, 0.78581151, 0.77615922, 0.77035744, 0.77751194
, 0.79902974, 0.81437881, 0.80788828, 0.79603865, 0.78966436, 0.79949807
, 0.80172182, 0.82168155, 0.85587911],
[ 1.0052447, 1.00007696, 1.00475899, 1.00613942, 1.00639561, 1.00162979
, 0.99860739, 1.00814981, 1.00574316, 0.99030032, 0.97682565, 0.97292596
, 0.96519561, 0.96173403, 0.95890284],
[ 0.95808419, 0.9382568, 0.9654441, 0.95561201, 0.96987289, 0.96608031
, 0.99727185, 1.00781194, 1.03484236, 1.05333619, 1.0983263, 1.1704974
, 1.17025154, 1.18730553, 1.14242645]])
self.assertTrue(np.allclose(result, expected))
self.assertTrue(type(result) == type(expected))
self.assertTrue(result.shape == expected.shape)
def test_rebin_data(self):
"""Test rebin_data"""
## sample in double the time (even case since 10 % 2 = 0):
## (0+1)/2, (2+3)/2, (4+5)/2, (6+7)/2, (8+9)/2
## = 0.5, 2.5, 4.5, 6.5, 8.5
ans_even = np.array([(i + 0.5) * np.ones(10, dtype=float)
for i in range(0, 10, 2)]).T
self.assertTrue(np.array_equal(std.rebin_data(self.time_data, 2), ans_even))
## sample in triple the time (uneven since 10 % 3 = 1):
## (0+1+2)/3, (3+4+5)/3, (6+7+8)/3, (9)/1
## = 1, 4, 7, 9
ans_odd = np.array([i * np.ones(10, dtype=float)
for i in (1, 4, 7, 9)]).T
self.assertTrue(np.array_equal(std.rebin_data(self.time_data, 3), ans_odd))
def test_get_prob_dist(self):
"""Test get_prob_dist"""
lag_indices = np.array([1, 2, 3, 4])
unit_indices = np.array([1, 3, 2, 4])
answer = np.array([
[ 0.0754717 , 0.88207547, 0.04245283, 0. , 0. ],
[ 0. , 0. , 0.09411765, 0.87058824, 0.03529412],
[ 0.0049505 , 0.09405941, 0.77722772, 0.11881188, 0.0049505 ],
[ 0. , 0. , 0. , 0.02352941, 0.97647059]
])
result = std.get_prob_dist(self.transition_matrix, lag_indices, unit_indices)
self.assertTrue(np.array_equal(result, answer))
def test_get_prob_stats(self):
"""Test get_prob_stats"""
probs = np.array([
[ 0.0754717 , 0.88207547, 0.04245283, 0. , 0. ],
[ 0. , 0. , 0.09411765, 0.87058824, 0.03529412],
[ 0.0049505 , 0.09405941, 0.77722772, 0.11881188, 0.0049505 ],
[ 0. , 0. , 0. , 0.02352941, 0.97647059]
])
unit_indices = np.array([1, 3, 2, 4])
answer_up = np.array([0.04245283, 0.03529412, 0.12376238, 0.])
answer_down = np.array([0.0754717, 0.09411765, 0.0990099, 0.02352941])
answer_trend = np.array([-0.03301887 / 0.88207547, -0.05882353 / 0.87058824, 0.02475248 / 0.77722772, -0.02352941 / 0.97647059])
answer_volatility = np.array([ 0.34221495, 0.33705421, 0.29226542, 0.38834223])
result = std.get_prob_stats(probs, unit_indices)
result_up = result[0]
result_down = result[1]
result_trend = result[2]
result_volatility = result[3]
self.assertTrue(np.allclose(result_up, answer_up))
self.assertTrue(np.allclose(result_down, answer_down))
self.assertTrue(np.allclose(result_trend, answer_trend))
self.assertTrue(np.allclose(result_volatility, answer_volatility))
|
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build',
'build_from_document'
'fix_method_name',
'key2param'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import MediaModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Parameters accepted by the stack, but not visible via discovery.
STACK_QUERY_PARAMETERS = ['trace', 'pp', 'userip', 'strict']
# Python reserved words.
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def fix_method_name(name):
"""Fix method names to avoid reserved word conflicts.
Args:
name: string, method name.
Returns:
The name with a '_' prefixed if the name is a reserved word.
"""
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
Args:
key: string, the method key name.
Returns:
A safe method name based on the key name.
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with an API. The serviceName and
version are the names from the Discovery service.
Args:
serviceName: string, name of the service.
version: string, the version of the service.
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URI to the discovery
document for that service.
developerKey: string, key obtained from
https://code.google.com/apis/console.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: apiclient.http.HttpRequest, encapsulator for an HTTP
request.
Returns:
A Resource object with methods for interacting with the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
return build_from_document(content, discoveryServiceUrl, http=http,
developerKey=developerKey, model=model, requestBuilder=requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object from a discovery
document that is it given, as opposed to retrieving one over HTTP.
Args:
service: string, discovery document.
base: string, base URI for all HTTP requests, usually the discovery URI.
future: string, discovery document with future capabilities (deprecated).
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and de-serializes requests and
responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with the service.
"""
# future is no longer used.
future = {}
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = _createResource(http, base, model, requestBuilder, developerKey,
service, service, schema)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer.
Args:
maxSize: string, size as a string, such as 2MB or 7GB.
Returns:
The size as an integer value.
"""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def _createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, rootDesc, schema):
"""Build a Resource from the API description.
Args:
http: httplib2.Http, Object to make http requests with.
baseUrl: string, base URL for the API. All requests are relative to this
URI.
model: apiclient.Model, converts to and from the wire format.
requestBuilder: class or callable that instantiates an
apiclient.HttpRequest object.
developerKey: string, key obtained from
https://code.google.com/apis/console
resourceDesc: object, section of deserialized discovery document that
describes a resource. Note that the top level discovery document
is considered a resource.
rootDesc: object, the entire deserialized discovery document.
schema: object, mapping of schema names to schema descriptions.
Returns:
An instance of Resource with all the methods attached for interacting with
that resource.
"""
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates a method for attaching to a Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
# TODO(jcgregorio) Use URLs from discovery once it is updated.
parsed = list(urlparse.urlparse(baseUrl))
basePath = parsed[2]
mediaPathUrl = '/upload' + basePath + pathUrl
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
# Add in the parameters common to all methods.
for name, desc in rootDesc.get('parameters', {}).iteritems():
methodDesc['parameters'][name] = desc
# Add in undocumented query parameters.
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
# Don't bother with doc string, it will be over-written by createMethod.
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
# Remove args that have a value of None.
keys = kwargs.keys()
for name in keys:
if kwargs[name] is None:
del kwargs[name]
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if methodName.endswith('_media'):
model = MediaModel()
elif 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
# Skip undocumented params and params common to all methods.
skip_parameters = rootDesc.get('parameters', {}).keys()
skip_parameters.append(STACK_QUERY_PARAMETERS)
for arg in argmap.iterkeys():
if arg in skip_parameters:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
if methodName.endswith('_media'):
docs.append('\nReturns:\n The media object as a string.\n\n ')
else:
docs.append('\nReturns:\n An object of the form:\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethod(theclass, methodName, methodDesc, rootDesc):
"""Creates any _next methods for attaching to a Resource.
The _next methods allow for easy iteration through list() responses.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
createMethod(Resource, methodName, methodDesc, rootDesc)
# Add in _media methods. The functionality of the attached method will
# change when it sees that the method name ends in _media.
if methodDesc.get('supportsMediaDownload', False):
createMethod(Resource, methodName + '_media', methodDesc, rootDesc)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, rootDesc):
"""Create a method on the Resource to access a nested Resource.
Args:
theclass: type, the class to attach methods to.
methodName: string, name of the method to use.
methodDesc: object, fragment of deserialized discovery document that
describes the method.
rootDesc: object, the entire deserialized discovery document.
"""
methodName = fix_method_name(methodName)
def methodResource(self):
return _createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, rootDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
createResourceMethod(Resource, methodName, methodDesc, rootDesc)
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
|
|
from __future__ import division
from cocos.actions import AccelDeccel
from cocos.actions import Delay
from cocos.actions import FadeTo
from cocos.actions import JumpBy
from cocos.actions import Move
from cocos.actions import MoveBy
from cocos.actions import Repeat
from cocos.actions import Reverse
from cocos.actions import RotateBy
from cocos.scenes.transitions import FlipAngular3DTransition
from pyglet.window import key
import cocos
import cocos.collision_model as cm
import pyglet
import levelFetcher
import Game_elements
class Game(cocos.layer.ColorLayer):
is_event_handler = True
def __init__(self, levelNum):
super(Game, self).__init__(22, 102, 225, 255)
self.level = levelFetcher.getLevel(levelNum)
self.rows = len(self.level.overworld)
self.cols = len(self.level.overworld[0])
self.isOverworld = True
self.onSwitch = False
self.spritesList = self.make3dList(self.rows, self.cols)
self.cellsToDraw = [(row, col) for row in range(self.rows) for col in range(self.cols)]
self.schedule(self.update)
def make3dList(self, rows, cols):
a=[]
for row in range(rows): a += [[None]*cols]
for row in range(rows):
for col in range(cols):
a[row][col] = []
return a
def redrawAll(self):
for cell in self.cellsToDraw:
(row, col) = cell
self.removeSprites(row, col)
self.drawFloorCell(row, col)
self.drawCell(row, col, self.level.persistent)
if self.isOverworld:
self.drawCell(row, col, self.level.overworld)
else:
self.drawCell(row, col, self.level.upsideDown)
self.drawPlayer()
self.cellsToDraw = []
def removeSprites(self, row, col):
for sprite in self.spritesList[row][col]:
self.remove(sprite)
self.spritesList[row][col] = []
def addSprite(self, row, col, sprite):
self.spritesList[row][col].append(sprite)
self.add(sprite)
def drawPlayer(self):
(row, col) = self.level.player.location
sprite = cocos.sprite.Sprite(self.level.player.overImg)
sprite.position = 16+32*col, -16+32*(self.rows-row)
self.addSprite(row, col, sprite)
def drawFloorCell(self, row, col):
if self.isOverworld:
sprite = cocos.sprite.Sprite(Game_elements.Floor().overImg)
else:
sprite = cocos.sprite.Sprite(Game_elements.Floor().underImg)
sprite.position = 16+32*col, -16+32*(self.rows-row)
self.addSprite(row, col, sprite)
def drawCell(self, row, col, dimension):
if isinstance(dimension[row][col], Game_elements.Floor):
return
if self.isOverworld:
sprite = cocos.sprite.Sprite(dimension[row][col].overImg)
else:
sprite = cocos.sprite.Sprite(dimension[row][col].underImg)
sprite.position = 16+32*col, -16+32*(self.rows-row)
self.addSprite(row, col, sprite)
def on_enter(self):
super(Game, self).on_enter()
#game_music = pyglet.resource.media('tetris.mp3', streaming=False)
# try:
# music_player.queue(game_music)
# except:
# pass
# music_player.play()
# music_player.eos_action = 'loop'
def on_exit(self):
super(Game, self).on_exit()
#music_player.seek(1)
#music_player.pause()
def isLegalMove(self, gameElement, drow, dcol, dimension):
(row, col) = gameElement.location
testRow = row + drow
testCol = col + dcol
testObject = dimension[testRow][testCol]
if testObject.isSolid:
if testObject.isMovable:
return self.doMove(testObject, drow, dcol)
return False
else:
return True
def updateLocation(self, gameElement, row, col, newRow, newCol):
if gameElement.dimension == "persistent":
self.level.persistent[row][col] = Game_elements.Floor((row, col), "persistent")
self.level.persistent[newRow][newCol] = gameElement
elif gameElement.dimension == "overworld":
self.level.overworld[row][col] = Game_elements.Floor((row, col), "overworld")
self.level.overworld[newRow][newCol] = gameElement
elif gameElement.dimension == "upsideDown":
self.level.upsideDown[row][col] = Game_elements.Floor((row, col), "upsideDown")
self.level.upsideDown[newRow][newCol] = gameElement
gameElement.location = (newRow, newCol)
def updateStates(self):
dimensionList = [(self.level.persistent, "persistent")]
if self.isOverworld:
dimensionList.append((self.level.overworld, "overworld"))
else:
dimensionList.append((self.level.upsideDown, "upsideDown"))
for row in range(self.rows):
for col in range(self.cols):
for dimension, dimensionName in dimensionList:
if isinstance(dimension[row][col], Game_elements.Portal):
if self.level.player.location == (row, col):
self.isOverworld = not self.isOverworld
self.cellsToDraw = [(row, col) for row in range(self.rows) for col in range(self.cols)]
if isinstance(dimension[row][col], Game_elements.Key):
if self.level.player.location == (row, col):
dimension[row][col] = Game_elements.Floor((row, col), dimensionName)
self.level.player.addKey()
if isinstance(dimension[row][col], Game_elements.Keywall):
if self.level.player.keyCount > 0:
dimension[row][col].isSolid = False
if self.level.player.location == (row, col):
self.level.player.removeKey()
dimension[row][col] = Game_elements.Floor((row, col), dimensionName)
if isinstance(dimension[row][col], Game_elements.Switch):
if isinstance(self.level.persistent[row][col], Game_elements.Rock):
dimension[row][col].activate()
else:
dimension[row][col].deactivate()
if isinstance(dimension[row][col], Game_elements.Door):
up = (1, 0)
down = (-1, 0)
left = (0, -1)
right = (0, 1)
for (drow, dcol) in (up, down, left, right):
if isinstance(dimension[row+drow][col+dcol], Game_elements.Switch):
if dimension[row+drow][col+dcol].isOn:
dimension[row][col].unlock()
self.cellsToDraw.append((row, col))
if isinstance(dimension[row][col], Game_elements.Ladder):
if self.level.player.location == (row, col):
print("YOU WON!")
def doMove(self, gameElement, drow, dcol):
(row, col) = gameElement.location
(newRow, newCol) = (row + drow, col + dcol)
if gameElement.dimension == "player" or gameElement.dimension == "persistent":
if not self.isLegalMove(gameElement, drow, dcol, self.level.persistent): return False
if self.isOverworld:
if not self.isLegalMove(gameElement, drow, dcol, self.level.overworld): return False
else:
if not self.isLegalMove(gameElement, drow, dcol, self.level.upsideDown): return False
if gameElement.dimension == "overworld":
if not self.isLegalMove(gameElement, drow, dcol, self.level.overworld): return False
if gameElement.dimension == "overworld":
if not self.isLegalMove(gameElement, drow, dcol, self.level.overworld): return False
self.updateLocation(gameElement, row, col, newRow, newCol)
self.updateStates()
self.cellsToDraw.append((row, col))
self.cellsToDraw.append((newRow, newCol))
return True
def on_key_press(self, symbol, modifiers):
if symbol == key.LEFT:
self.doMove(self.level.player, 0, -1)
elif symbol == key.RIGHT:
self.doMove(self.level.player, 0, +1)
elif symbol == key.UP:
self.doMove(self.level.player, -1, 0)
elif symbol == key.DOWN:
self.doMove(self.level.player, +1, 0)
def update(self, dt):
self.redrawAll()
class MainMenu(cocos.menu.Menu):
def __init__(self):
super(MainMenu, self).__init__('Upside-Down')
self.font_title['font_name'] = 'Edit Undo Line BRK'
self.font_title['font_size'] = 43
self.font_title['color'] = (255, 180, 0, 255)
self.font_item['color'] = (55, 55, 55, 255)
self.font_item_selected['color'] = (255,255, 255, 255)
items = []
items.append(cocos.menu.MenuItem('New game', self.on_new_game))
items.append(cocos.menu.MenuItem('Options', self.on_options))
items.append(cocos.menu.MenuItem('Quit', self.on_quit))
self.create_menu(items, cocos.menu.shake(), cocos.menu.shake_back())
def on_new_game(self):
game_layer = Game(1)
game_scene = cocos.scene.Scene(game_layer)
cocos.director.director.push(
FlipAngular3DTransition(game_scene, 1))
def on_options(self):
self.parent.switch_to(1)
def on_quit(self):
pyglet.app.exit()
class OptionsMenu(cocos.menu.Menu):
def __init__(self):
super(OptionsMenu, self).__init__('Upside-Down')
self.font_title['font_name'] = 'Edit Undo Line BRK'
self.font_title['font_size'] = 43
self.font_title['color'] = (255, 180, 0, 255)
self.font_item['color'] = (55, 55, 55, 255)
self.font_item_selected['color'] = (255,255, 255, 255)
items = []
#items.append(cocos.menu.ToggleMenuItem(
# 'Show FPS:',
# self.on_show_fps,
# cocos.director.director.show_FPS)
#)
items.append(cocos.menu.MenuItem('Fullscreen', self.on_fullscreen))
items.append(cocos.menu.MenuItem('Back', self.on_quit))
self.create_menu(items, cocos.menu.shake(), cocos.menu.shake_back())
def on_fullscreen(self):
cocos.director.director.window.set_fullscreen(
not cocos.director.director.window.fullscreen)
def on_quit(self):
self.parent.switch_to(0)
def on_show_fps(self, value):
cocos.director.director.show_FPS = value
class BackgroundLayer(cocos.layer.Layer):
def __init__(self):
super(BackgroundLayer, self).__init__()
r = Game_elements.Player()
self.image = cocos.sprite.Sprite(r.overImg)
self.image.position = 400, 75
self.add(self.image, z=0)
self.player = cocos.sprite.Sprite(r.overImg)
self.player.position = 0, 295
self.add(self.player, z=1)
self.enemy = cocos.sprite.Sprite(r.overImg)
self.enemy.position = 385, 75
self.add(self.enemy, z=1)
self.boss = cocos.sprite.Sprite(r.overImg)
self.boss.scale = 0.4
rect = self.boss.get_rect()
rect.midbottom = 600, 50
self.boss.position = rect.center
self.add(self.boss, z=1)
self.player.do(Repeat(
MoveBy((-25, 0), 0.25) +
MoveBy((50, 0), 0.5) +
MoveBy((-25, 0), 0.25)))
self.enemy.do(Repeat(
MoveBy((-25, 0), 0.25) +
MoveBy((50, 0), 0.5) +
MoveBy((-25, 0), 0.25)))
self.boss.do(Repeat(FadeTo(155, 0.5) + FadeTo(255, 0.5)))
if __name__ == '__main__':
cocos.director.director.init(
width=12*32,
height=12*32,
caption="The Upside-Down"
)
music_player = pyglet.media.Player()
music_player.volume = 1
scene = cocos.scene.Scene()
scene.add(cocos.layer.MultiplexLayer(MainMenu(), OptionsMenu()), z=1)
scene.add(BackgroundLayer(), z=0)
cocos.director.director.run(scene)
|
|
from __future__ import absolute_import
from sentry.utils.rust import merge_rust_info_frames, starts_with, strip_symbol
STACKTRACE = """
stacktrace: stack backtrace:
0: 0x111e51cf4 - backtrace::backtrace::trace::h38e3b1de9f341e04
at /.cargo/registry/src/github.com-1ecc6299db9ec823/backtrace-0.3.9/src/backtrace/mod.rs:42
1: 0x111e4a3be - failure::backtrace::Backtrace::new::h2abf3908d09948f1
at /.cargo/registry/src/github.com-1ecc6299db9ec823/failure-0.1.3/src/backtrace/mod.rs:111
2: 0x11163e27c - <failure::error::Error as core::convert::From<F>>::from::h5ae4b38f39150cb2
at /.cargo/registry/src/github.com-1ecc6299db9ec823/failure-0.1.3/src/error/mod.rs:36
- <T as core::convert::Into<U>>::into::h58e05f056150874e
at libcore/convert.rs:456
3: 0x11163a9b7 - symbolic::debuginfo::symbolic_normalize_debug_id::{{closure}}::he767b4111eb41a33
at /symbolic/cabi/src/debuginfo.rs:160
4: 0x111e7f5de - ___rust_maybe_catch_panic
at /rustc/da5f414c2c0bfe5198934493f04c676e2b23ff2e/src/libpanic_unwind/lib.rs:103
5: 0x111618fcb - std::panic::catch_unwind::h66eea40447da0e66
at /symbolic/cabi/libstd/panic.rs:392
6: 0x11160b9c1 - symbolic::utils::landingpad::h3cd528225184a301
at /symbolic/cabi/src/utils.rs:55
7: 0x111632f43 - _symbolic_normalize_debug_id
at /symbolic/cabi/src/utils.rs:74
8: 0x7fff69609f6b - _ffi_call_unix64
9: 0x7fff6960a786 - _ffi_call
10: 0x10fab19d6 - _cdata_call
11: 0x10efc014f - _PyObject_Call
12: 0x10f069f43 - _Py_Main
"""
STACKTRACE_SEMAPHORE_LINUX = """
stacktrace: stack backtrace:
0: failure::backtrace::internal::InternalBacktrace::new::hc23de41c89e8c745 (0x7f2d0af481ba)
at /home/parallels/.cargo/registry/src/github.com-1ecc6299db9ec823/failure-0.1.5/src/backtrace/internal.rs:44
1: <T as core::convert::Into<U>>::into::hd4b72738b7e18e92 (0x7f2d0b070e3a)
at /home/parallels/.cargo/registry/src/github.com-1ecc6299db9ec823/failure-0.1.5/src/backtrace/mod.rs:111
semaphore::utils::set_panic_hook::{{closure}}::hacec55cb6b285e6a
at src/utils.rs:45
2: std::panicking::rust_panic_with_hook::h3c82d7c1012a629a (0x7f2d0b3d6fb6)
at src/libstd/panicking.rs:477
3: std::panicking::begin_panic::h3db9895361250d80 (0x7f2d0b06ba94)
at /rustc/224f0bc90c010b88ca6ec600c9b02f6e3638d78e/src/libstd/panicking.rs:407
4: semaphore::processing::semaphore_test_panic::{{closure}}::hb800a646d3f454a4 (0x7f2d0b06e999)
at src/processing.rs:119
5: std::panic::catch_unwind::hdc352a616e262d7e (0x7f2d0b03c59a)
at /rustc/224f0bc90c010b88ca6ec600c9b02f6e3638d78e/src/libstd/panicking.rs:292
semaphore_test_panic
at src/utils.rs:53
6: ffi_call_unix64 (0x7f2d0b839df0)
7: ffi_call (0x7f2d0b839858)
at ../src/x86/ffi64.c:525
8: cdata_call (0x7f2d0ba57d64)
at c/_cffi_backend.c:3025
9: PyObject_Call (0x459eee)
10: _PyEval_EvalFrameDefault (0x552c49)
11: <unknown> (0x54fbe1)
12: <unknown> (0x54fe6d)
13: _PyEval_EvalFrameDefault (0x5546cf)
14: <unknown> (0x54f0e8)
15: <unknown> (0x550116)
16: _PyEval_EvalFrameDefault (0x5546cf)
17: <unknown> (0x54fbe1)
18: PyEval_EvalCode (0x550b93)
19: <unknown> (0x42ca41)
20: PyRun_InteractiveLoopFlags (0x42ccb6)
21: PyRun_AnyFileExFlags (0x42ce5c)
22: Py_Main (0x442143)
23: main (0x421ff4)
24: __libc_start_main (0x7f2d0e8beb97)
25: _start (0x4220aa)
26: <unknown> (0x0)
"""
def get_event(stacktrace):
return {
"event_id": "fe628bfa48064c9b97ce7e75a19e6197",
"level": "error",
"platform": "python",
"logentry": {"formatted": "invalid debug identifier\n\n%s" % stacktrace},
"exception": {
"values": [
{
"type": "ParseDebugIdError",
"value": "invalid debug identifier\n\n%s" % stacktrace,
"stacktrace": {
"frames": [
{
"abs_path": "/symbolic/py/symbolic/utils.py",
"filename": "symbolic/utils.py",
"function": "rustcall",
"in_app": True,
"lineno": 93,
"module": "symbolic.utils",
}
]
},
}
]
},
}
def get_exc_info(rust_info):
exc = ValueError("hello world")
if rust_info is not None:
exc.rust_info = rust_info
return type(exc), exc, None
def test_merge_rust_info():
event = get_event(STACKTRACE)
exc_info = get_exc_info(STACKTRACE)
merge_rust_info_frames(event, {"exc_info": exc_info})
assert event["platform"] == "native"
assert event["logentry"]["formatted"] == "invalid debug identifier"
exception = event["exception"]["values"][0]
assert exception["value"] == "invalid debug identifier"
frames = exception["stacktrace"]["frames"]
assert len(frames) == 8
assert frames[0]["platform"] == "python"
# Top frame
assert frames[7]["instruction_addr"] == "0x11163e27c"
assert frames[7]["function"] == "<failure::error::Error as core::convert::From<F>>::from"
assert frames[7]["package"] == "failure"
assert frames[7]["in_app"] is False
assert frames[7]["filename"] == "mod.rs"
assert frames[7]["lineno"] == 36
# Inlined frame, same address
assert frames[7]["instruction_addr"] == "0x11163e27c"
assert frames[6]["function"] == "<T as core::convert::Into<U>>::into"
assert frames[6]["package"] == "core"
assert frames[6]["in_app"] is False
assert frames[6]["filename"] == "convert.rs"
assert frames[6]["lineno"] == 456
def test_merge_rust_info_linux():
event = get_event(STACKTRACE_SEMAPHORE_LINUX)
exc_info = get_exc_info(STACKTRACE_SEMAPHORE_LINUX)
merge_rust_info_frames(event, {"exc_info": exc_info})
assert event["platform"] == "native"
assert event["logentry"]["formatted"] == "invalid debug identifier"
exception = event["exception"]["values"][0]
assert exception["value"] == "invalid debug identifier"
frames = exception["stacktrace"]["frames"]
assert len(frames) == 4
assert frames[0]["platform"] == "python"
# Top frame
assert frames[-1]["instruction_addr"] == "0x7f2d0b06e999"
assert frames[-1]["function"] == "semaphore::processing::semaphore_test_panic::{{closure}}"
# Inlined frame, same address
assert frames[-2]["instruction_addr"] == "0x7f2d0b03c59a"
assert frames[-2]["function"] == "std::panic::catch_unwind"
def test_without_exc_info():
event = get_event(STACKTRACE)
merge_rust_info_frames(event, {})
assert event["platform"] == "python"
def test_without_rust_info():
event = get_event(STACKTRACE)
exc_info = get_exc_info(None)
merge_rust_info_frames(event, {"exc_info": exc_info})
assert event["platform"] == "python"
def test_without_stacktrace():
stacktrace = "stacktrace: stack backtrace:\n\n"
event = get_event(stacktrace)
exc_info = get_exc_info(stacktrace)
merge_rust_info_frames(event, {"exc_info": exc_info})
assert event["platform"] == "native"
assert event["logentry"]["formatted"] == "invalid debug identifier"
exception = event["exception"]["values"][0]
assert exception["value"] == "invalid debug identifier"
frames = exception["stacktrace"]["frames"]
assert len(frames) == 1
def test_without_exception():
event = get_event(STACKTRACE)
exc_info = get_exc_info(STACKTRACE)
del event["exception"]
merge_rust_info_frames(event, {"exc_info": exc_info})
assert event["platform"] == "python"
def test_starts_with():
# Basic functions
assert starts_with("__rust_maybe_catch_panic", "__rust")
assert starts_with("futures::task_impl::std::set", "futures::")
assert not starts_with("futures::task_impl::std::set", "tokio::")
# Generics
assert starts_with("_<futures..task_impl..Spawn<T>>::enter::_{{closure}}", "futures::")
assert not starts_with("_<futures..task_impl..Spawn<T>>::enter::_{{closure}}", "tokio::")
assert starts_with("<futures::task_impl::Spawn<T>>::enter::{{closure}}", "futures::")
assert not starts_with("<futures::task_impl::Spawn<T>>::enter::{{closure}}", "tokio::")
# Trait implementations
assert starts_with("<failure::error::Error as core::convert::From<F>>::from", "failure::")
assert starts_with("_<failure::error::Error as core::convert::From<F>>::from", "failure::")
# Blanket implementations
assert starts_with("<T as core::convert::Into<U>>::into", "core::")
def test_strip_symbol():
assert strip_symbol("") == ""
assert strip_symbol("_ffi_call_unix64") == "_ffi_call_unix64"
assert (
strip_symbol("backtrace::backtrace::trace::h1c213d29ba950696")
== "backtrace::backtrace::trace"
)
assert (
strip_symbol("<T as core::convert::Into<U>>::into::h58e05f056150874e")
== "<T as core::convert::Into<U>>::into"
)
assert strip_symbol("symbolic_symcache_from_object") == "symbolic_symcache_from_object"
|
|
import numpy as np
from utils import Utils, WordDict
import time
from kernel_density import NonParametricPdf
class TickerCore():
""" This is file contains the core algorithm of ticker.
Input:
* i_select_thresh: If p(word|clicks) > word_select_thresh, we'll ask the user if he/she wants to select this word.
* i_dict_name: The dictionary containing all the prior probabilities of words that can be selected."""
############################################# Initialisation functions
def __init__(self, i_select_thresh=0.9,
i_dict_name="dictionaries/nomon_dict.txt" ):
self.click_distr = None
self.letter_idx = 0
self.click_times = []
self.params = {}
self.utils = Utils()
self.setWordSelectThresh(i_select_thresh)
self.setDict(i_dict_name)
self.calibrate = False
#Debug
self.disp = False
self.diagnostic = False
############################################# Main functions
def undoLastLetter(self):
self.click_times = []
def newClick(self, i_click_time):
"""Store the new click"""
self.click_times.append( i_click_time )
click_scores = self.click_distr.logLikelihood( self.click_times, i_log=True )
return click_scores
def newWord(self):
"""Each time a new word has to be predicted this function has to be called."""
self.dict.log_probs = np.array(self.prior_log_probs)
if self.click_distr is not None:
while len(self.click_distr.train_obs) > len(self.click_distr.obs_letters):
self.click_distr.train_obs.pop()
self.letter_idx = 0
self.click_times = []
def newLetter(self, i_process_word_selections=True, i_letter_scores=None):
"""Process all the received clicks after the letters associated with them have been received"""
if not self.clicksReceived():
return
#Add the click times to the training data
if self.is_train:
self.click_distr.storeObservation(self.click_times)
#Update the word posteriors: Priors for next iteration
if i_letter_scores is not None:
click_scores = np.array(i_letter_scores)
else:
click_scores = self.click_distr.logLikelihood( self.click_times, i_log=True )
self.updateWordPosteriors( click_scores )
#Find the word the maximum posterior probability, and extract the posterior prob of the desired word
best_idx = np.argmax(self.dict.log_probs)
best_score = np.exp(self.dict.log_probs[best_idx])
if self.diagnostic:
print "TICKER CORE NEW: Best score = ", best_score, " best word = ", self.dict.words[best_idx]
selected_word = self.selectWord(best_score, best_idx, i_process_word_selections)
if (selected_word is None) or (not i_process_word_selections):
self.letter_idx += 1
self.click_times = []
return selected_word
def selectWord(self, i_best_score, i_best_idx, i_process_word_selections):
if i_best_score < self.params['word_select_thresh']:
return
selected_word = self.dict.words[i_best_idx]
if self.disp:
print "In ticker_core, selected_word = ", selected_word, " prob = ", i_best_score
if not i_process_word_selections:
if selected_word == '.':
return '.'
return selected_word[0:-1]
self.train(selected_word)
self.newWord()
return selected_word
def clicksReceived(self):
return len(self.click_times) > 0
def train(self, i_selected_word):
if not self.is_train:
print "NO TRAINING: return"
return
train_word = self.getTrainingWord(i_selected_word)
self.click_distr.train(train_word)
def trainClickDistrAndInitialise(self, i_selected_word):
"""* Even if the click distr is not trainable it will be trained
* All samples will be used to initialise a histogram (no online adaptation)
* This is typically used only if there is no uncertainty to which word the
user was trying to write, i.e., d. """
(is_train, learn_rate) = (self.click_distr.is_train, self.click_distr.learning_rate)
if not is_train:
print "NO training"
return
(learn_delay, learn_std) = (self.click_distr.learn_delay, self.click_distr.learn_std)
(learn_fp_rate, learn_fr) = (self.click_distr.learn_fp, self.click_distr.learn_fr)
self.click_distr.learning_rate = 1.0
print "LEARN DELAY = ", learn_delay, " std = ", learn_std, " fp = ", learn_fp_rate, " fr = ", learn_fr
self.click_distr.histogram.learning_rate = self.click_distr.learning_rate
#Correct the letter index (one more) because i_process_word_selections was False
self.letter_idx -= 1
self.train(i_selected_word)
self.click_distr.is_train = is_train
self.click_distr.learning_rate = learn_rate
self.click_distr.histogram.learning_rate = learn_rate
self.click_distr.is_train = is_train
self.click_distr.learn_delay = learn_delay
self.click_distr.learn_std = learn_std
self.click_distr.learn_fp = learn_fp_rate
self.click_distr.learn_fr = learn_fr
self.newWord()
############################################# Get Functions
def getTrainingWord(self, i_selected_word):
train_word = list(str(i_selected_word))
if self.letter_idx < (len(i_selected_word)-1):
train_word = train_word[0:(self.letter_idx+1)]
return "".join(train_word)
if i_selected_word == ".":
if self.letter_idx == 0:
return "".join(train_word)
for n in range(0, self.letter_idx):
train_word.append(".")
return "".join(train_word)
letter_idx = self.warpIndices(self.letter_idx+1, len(i_selected_word))
if letter_idx == 0:
word_multiple = (self.letter_idx+1) / len(i_selected_word)
for n in range(1, word_multiple):
train_word.extend( list(str(i_selected_word)) )
return "".join(train_word)
word_multiple = self.letter_idx / len(i_selected_word)
for n in range(1, word_multiple):
train_word.extend( list(str(i_selected_word)) )
end_idx = len(i_selected_word) + letter_idx
for n in range(0, end_idx):
train_word.append(i_selected_word[n])
return "".join(train_word)
def getBestWordProbs(self, i_n=-1):
"""Return the best i_n words, if i_n=-1 all will be returned"""
best_idx = np.argsort(-self.dict.log_probs).flatten()
if i_n > 0:
best_idx = best_idx[0:i_n]
return (self.dict.words[best_idx], np.exp(self.dict.log_probs[best_idx]))
def getLetterIndex(self):
return self.letter_idx
def getNumberClicks(self):
if not self.clicksReceived():
return 0
return len(self.click_times)
############################################ Set Functions
def setClickDistr(self, i_click_distr):
self.is_train = i_click_distr.is_train
self.click_distr = i_click_distr
if i_click_distr is None:
return
self.letter_indices = self.letterIndices()
self.newWord()
def setChannelConfig(self, i_channel_config):
self.click_distr.reset(i_channel_config)
def setWordSelectThresh(self, i_value):
self.params['word_select_thresh'] = i_value
def setDict(self, i_file_name):
self.params['dict_name'] = i_file_name
self.dict = WordDict(self.params['dict_name'])
self.prior_log_probs = np.array(self.dict.log_probs)
self.newWord()
self.word_indices = self.wordIndices()
##################################### Word posteriors
def letterIndices(self):
"""Store the alphabet positions in dictionary - optimisation when extracting word scores"""
letter_indices = {}
for n in range(0, self.click_distr.loc.shape[0] ):
letter_indices[ self.click_distr.alphabet[n]] = n
return letter_indices
def wordIndices(self):
#Store all the word positions in dict: diagnostic purposes
word_indices = {}
for n in range(0, len(self.dict.words)):
word_indices[self.dict.words[n]] = n
return word_indices
def warpIndices(self, i_letter_idx, i_word_lengths):
"""Wrap the index around in cases where letter_idx > input word lengths."""
letter_idx = -i_letter_idx / i_word_lengths
letter_idx *= i_word_lengths
letter_idx += i_letter_idx
return letter_idx
def curLetterList(self, i_letter_idx ):
"""Extract the current letter of the alphabet as an index"""
letter_indices = self.warpIndices( i_letter_idx, self.dict.word_lengths)
letter_list = np.array( [self.letter_indices[self.dict.words[n][idx]] for n, idx in enumerate(letter_indices) ] )
return letter_list
def updateWordPosteriors(self, i_log_letter_scores ):
letters_idx = self.curLetterList(self.letter_idx)
self.dict.log_probs = self.dict.normalise( i_log_letter_scores[letters_idx] + self.dict.log_probs )
#################################################### Display
def dispClickTimes(self, i_grnd_truth_word=None, i_selected_word=None):
if not self.disp:
return
letter_scores = self.click_distr.logLikelihood(self.click_times, i_log=True)
click_time_str = self.utils.stringVector(np.array(self.click_times))
print "click_times = ", click_time_str, " letter index = ", self.letter_idx,
if i_grnd_truth_word is not None:
warped_idx = self.wrapIndices(self.letter_idx, len(i_grnd_truth_word))
print "Selected word = ", i_selected_word, " grnd truth = ", i_grnd_truth_word,
print " ", i_grnd_truth_word[warped_idx]
else:
print " "
print "============================================================================="
#print " stored keys = ", cn.keys()
for (m, letter) in enumerate( self.click_distr.alphabet):
click_time_str = self.utils.stringVector(np.array(self.click_times)-self.click_distr.delay)
loc_str = self.utils.stringVector(self.click_distr.loc[m,:] , i_type="%.3f")
param_str = "delay=%.3f, std=%.3f, fr=%.3f, fp_rate=%.3f" % (self.click_distr.delay,
self.click_distr.std, self.click_distr.fr, self.click_distr.fp_rate)
print "letter=%s, loc=%s, click_times-delay=%s, score=%.3f, %s" % (letter, loc_str,
click_time_str, letter_scores[m], param_str )
def dispBestWords(self):
if not self.disp:
return
print "Best words: "
(best_words, best_word_scores) = self.getBestWordProbs(10)
for n in range(0, len(best_words)):
print best_words[n], " ", best_word_scores[n]
|
|
from kivy.uix.scrollview import ScrollView
from kivy.properties import ObjectProperty, NumericProperty, StringProperty,\
BoundedNumericProperty, BooleanProperty, OptionProperty
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.checkbox import CheckBox
from kivy.uix.spinner import Spinner
from kivy.app import App
from designer.undo_manager import PropOperation
class PropertyLabel(Label):
'''This class represents the :class:`~kivy.label.Label` for showing
Property Names in :class:`~designer.propertyviewer.PropertyViewer`.
'''
pass
class PropertyBase(object):
'''This class represents Abstract Class for Property showing classes i.e.
PropertyTextInput and PropertyBoolean
'''
propwidget = ObjectProperty()
'''It is an instance to the Widget whose property value is displayed.
:data:`propwidget` is a :class:`~kivy.properties.ObjectProperty`
'''
propname = StringProperty()
'''It is the name of the property.
:data:`propname` is a :class:`~kivy.properties.StringProperty`
'''
propvalue = ObjectProperty(allownone=True)
'''It is the value of the property.
:data:`propvalue` is a :class:`~kivy.properties.ObjectProperty`
'''
oldvalue = ObjectProperty(allownone=True)
'''It is the old value of the property
:data:`oldvalue` is a :class:`~kivy.properties.ObjectProperty`
'''
have_error = BooleanProperty(False)
'''It specifies whether there have been an error in setting new value
to property
:data:`have_error` is a :class:`~kivy.properties.BooleanProperty`
'''
proptype = StringProperty()
'''It is the type of property.
:data:`proptype` is a :class:`~kivy.properties.StringProperty`
'''
record_to_undo = BooleanProperty(False)
'''It specifies whether the property change has to be recorded to undo.
It is used when :class:`~designer.undo_manager.UndoManager` undoes
or redoes the property change.
:data:`record_to_undo` is a :class:`~kivy.properties.BooleanProperty`
'''
kv_code_input = ObjectProperty()
'''It is a reference to the
:class:`~designer.uix.kv_code_input.KVLangArea`.
:data:`kv_code_input` is a :class:`~kivy.properties.ObjectProperty`
'''
def set_value(self, value):
'''This function first converts the value of the propwidget, then sets
the new value. If there is some error in setting new value, then it
sets the property value back to oldvalue
'''
self.have_error = False
conversion_err = False
oldvalue = getattr(self.propwidget, self.propname)
try:
if isinstance(self.propwidget.property(self.propname),
NumericProperty):
if value == 'None' or value == '':
value = None
else:
value = float(value)
except Exception:
conversion_err = True
root = App.get_running_app().root
if not conversion_err:
try:
setattr(self.propwidget, self.propname, value)
self.kv_code_input.set_property_value(self.propwidget,
self.propname, value,
self.proptype)
if self.record_to_undo:
root.undo_manager.push_operation(
PropOperation(self, oldvalue, value))
self.record_to_undo = True
except Exception:
self.have_error = True
setattr(self.propwidget, self.propname, oldvalue)
class PropertyOptions(PropertyBase, Spinner):
'''PropertyOptions to show/set/get options for an OptionProperty
'''
def __init__(self, prop, **kwargs):
PropertyBase.__init__(self, **kwargs)
Spinner.__init__(self, values=prop.options, **kwargs)
def on_propvalue(self, *args):
'''Default handler for 'on_propvalue'.
'''
self.text = self.propvalue
class PropertyTextInput(PropertyBase, TextInput):
'''PropertyTextInput is used as widget to display
:class:`~kivy.properties.StringProperty` and
:class:`~kivy.properties.NumericProperty`.
'''
def insert_text(self, substring, from_undo=False):
'''Override of :class:`~kivy.uix.textinput.TextInput`.insert_text,
it first checks whether the value being entered is valid or not.
If yes, then it enters that value otherwise it doesn't.
For Example, if Property is NumericProperty then it will
first checks if value being entered should be a number
or decimal only.
'''
if self.proptype == 'NumericProperty' and \
substring.isdigit() is False and\
(substring != '.' or '.' in self.text)\
and substring not in 'None':
return
super(PropertyTextInput, self).insert_text(substring)
class PropertyBoolean(PropertyBase, CheckBox):
'''PropertyBoolean is used as widget to display
:class:`~kivy.properties.BooleanProperty`.
'''
pass
class PropertyViewer(ScrollView):
'''PropertyViewer is used to display property names and their corresponding
value.
'''
widget = ObjectProperty(allownone=True)
'''Widget for which properties are displayed.
:data:`widget` is a :class:`~kivy.properties.ObjectProperty`
'''
prop_list = ObjectProperty()
'''Widget in which all the properties and their value is added. It is a
:class:`~kivy.gridlayout.GridLayout.
:data:`prop_list` is a :class:`~kivy.properties.ObjectProperty`
'''
kv_code_input = ObjectProperty()
'''It is a reference to the KVLangArea.
:data:`kv_code_input` is a :class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(PropertyViewer, self).__init__(**kwargs)
self._label_cache = {}
def on_widget(self, instance, value):
'''Default handler for 'on_widget'.
'''
self.clear()
if value is not None:
self.discover(value)
def clear(self):
'''To clear :data:`prop_list`.
'''
self.prop_list.clear_widgets()
def discover(self, value):
'''To discover all properties and add their
:class:`~designer.propertyviewer.PropertyLabel` and
:class:`~designer.propertyviewer.PropertyBoolean`/
:class:`~designer.propertyviewer.PropertyTextInput`
to :data:`prop_list`.
'''
add = self.prop_list.add_widget
get_label = self._get_label
props = value.properties().keys()
props.sort()
for prop in props:
ip = self.build_for(prop)
if not ip:
continue
add(get_label(prop))
add(ip)
def _get_label(self, prop):
try:
return self._label_cache[prop]
except KeyError:
lbl = self._label_cache[prop] = PropertyLabel(text=prop)
return lbl
def build_for(self, name):
'''To create :class:`~designer.propertyviewer.PropertyBoolean`
:class:`~designer.propertyviewer.PropertyTextInput`
for Property 'name'
'''
prop = self.widget.property(name)
if isinstance(prop, NumericProperty):
return PropertyTextInput(propwidget=self.widget, propname=name,
proptype='NumericProperty',
kv_code_input=self.kv_code_input)
elif isinstance(prop, StringProperty):
return PropertyTextInput(propwidget=self.widget, propname=name,
proptype='StringProperty',
kv_code_input=self.kv_code_input)
elif isinstance(prop, BooleanProperty):
ip = PropertyBoolean(propwidget=self.widget, propname=name,
proptype='BooleanProperty',
kv_code_input=self.kv_code_input)
ip.record_to_undo = True
return ip
elif isinstance(prop, OptionProperty):
ip = PropertyOptions(prop, propwidget=self.widget, propname=name,
proptype='StringProperty',
kv_code_input=self.kv_code_input)
return ip
return None
|
|
# -*- Mode: Python -*-
# attempting a version of core utils for random binaries (as opposed to python core dumps)
import os
import parse_elf
import struct
import sys
from pprint import pprint as pp
W = sys.stderr.write
elf_data = {}
# this should be set by the elf data
psize = None
def read_map (filename, base=0):
global elf_data
info = parse_elf.go (filename)
elf_data[filename] = (base, info)
ehdr, phdrs, shdrs, syms, core_info = info
result = []
for phdr in phdrs:
if phdr['type'] == 'load':
result.append ((phdr['memsz'], base + phdr['vaddr'], phdr['offset'], phdr['filesz']))
result.sort()
return result
class searchable_file:
block_size = 1<<16
def __init__ (self, fd, size):
self.fd = fd
self.size = size
def find (self, needle, position, size=None):
if size is None:
size = self.size - position
while position < self.size:
os.lseek (self.fd, position, 0)
block = os.read (self.fd, self.block_size)
maybe = block.find (needle)
if maybe != -1:
return position + maybe
else:
# fuzz the block size in case needle straddles the boundary
position += (self.block_size - (len(needle) - 1))
return None # Not found
def seek (self, position):
os.lseek (self.fd, position, 0)
def read (self, size):
return os.read (self.fd, size)
def valid_address (addr):
for mmap, mfd, msize, mfile, base in maps:
for memsz, vaddr, offset, filesz in mmap:
if vaddr <= addr < (vaddr + memsz):
return (addr - vaddr) + offset, mfile
return None
def to_disk (addr):
probe = valid_address (addr)
if probe is None:
raise ValueError ("address out of range")
else:
return probe
def from_disk (pos):
for mmap, mfd, msize, mfile, base in maps:
for memsz, vaddr, offset, filesz in mmap:
if offset <= pos < (offset + filesz):
return (pos - offset) + vaddr
raise ValueError, "address out of range"
def read (address, nbytes=4):
# verify all addresses before trying to read them.
probe = valid_address (address)
if probe is not None:
pos, mm = probe
mm.seek (pos)
#print 'addr: %x, pos: %d, mm=%s' % (address, pos, mm)
return mm.read (nbytes)
else:
raise ValueError, "address out of range"
def read_long (address):
return struct.unpack (long_struct, read (address, psize))[0]
def read_struct (address, format):
return struct.unpack (format, read (address, struct.calcsize (format)))
def read_string (address):
if not address:
return '<null>'
else:
r = []
while 1:
ch = read (address, 1)
if ch == '\000':
break
else:
r.append (ch)
address += 1
return ''.join (r)
class finder:
def __init__ (self, s):
self.last = 0
self.s = s
def next (self):
mmap, mfd, msize, mfile, base = maps[0]
addr = mfile.find (self.s, self.last)
if addr is None:
return None
else:
self.last = addr + len (self.s)
return from_disk (addr)
def all (self):
result = []
while 1:
try:
n = self.next()
except KeyboardInterrupt:
sys.stderr.write ('\n')
return result
else:
sys.stderr.write ('+')
if n is None:
break
else:
result.append (n)
sys.stderr.write ('\n')
return result
def find_all (s):
"find all occurrences of string <s> in the core file"
return finder(s).all()
def find (s):
"find the string <s> in the core file"
global _f, next
# save this away so we can continue the search
_f = finder (s)
next = _f.next
return next()
def who_points_to (addr, max_items=30, aligned=True):
# assumes maps[0] is the core file
mmap, mfd, msize, mfile, base = maps[0]
results = []
# address in string form
if psize == 4:
s = struct.pack ('<l', addr)
else:
s = struct.pack ('<q', addr)
found = base
for i in range (max_items):
found = mfile.find (s, found + psize)
sys.stderr.write ('.')
if found is None:
break
else:
in_mem = from_disk (found)
if aligned and (in_mem % psize) != 0:
pass
else:
results.append (in_mem)
sys.stderr.write ('\n')
return results
def WP (addr=None):
if addr is None:
addr = _
return who_points_to (addr)
symbols = None
def read_symbols():
global symbols
r = {}
for path, (base, (ehdr, phdrs, shdrs, syms, core_info)) in elf_data.items():
for sym in syms:
if sym['type'] in ('func', 'object'):
name = sym['name']
p = r.get (name, None)
if p is None:
r[name] = p = []
p.append ((sym['type'], base + sym['value'], path))
symbols = r
def get_sym (name, address_of=0, which=None):
probe = symbols.get (name)
if probe is None:
return None
else:
if which is not None:
raise NotImplementedError
kind, val, path = probe[0]
if kind == 'func' or address_of:
return val
else:
return read_long (val)
# elf_common.h
DT_DEBUG = 21
def find_solibs():
debug_base = None
for filename, (base, info) in elf_data.items():
if filename == exe_path:
ehdr, phdrs, shdrs, syms, core_info = info
for d in shdrs:
if d['type'] == 'dynamic':
# ugh, why did I have to undo 64-bit support...
#assert (d['entsize'] == 8)
#exe_file.seek (d['offset'])
offset = d['addr']
# Note: 'P' won't work because we may
# not be running on the same machine!
if d['entsize'] == 8:
spec = '<LL'
elif d['entsize'] == 16:
spec = '<QQ'
for i in range (0, d['size'], d['entsize']):
tag, val = read_struct (offset + i, spec)
if tag == DT_DEBUG:
debug_base = val
link_map = read_long (debug_base + psize)
# ok, now we have the link map, we can walk it and find all so's.
result = []
# Note: 'P' won't work because we may
# not be running on the same machine!
if psize == 4:
spec = '<LLLLL'
elif psize == 8:
spec = '<QQQQQ'
while 1:
addr, name, ld, next, prev = read_struct (link_map, spec)
result.append ((addr, read_string (name)))
if not next:
break
else:
link_map = next
return result
def map_file (path, base=0):
global maps
print '%16x %s' % (base, path)
mmap = read_map (path, base)
mfd = os.open (path, os.O_RDONLY)
msize = os.lseek (mfd, 0, 2)
mfile = searchable_file (mfd, msize)
maps.append ((mmap, mfd, msize, mfile, base))
def set_psize():
global psize, long_struct
# pick a file randomly
base, info = elf_data[exe_path]
ehdr, phdrs, shdrs, syms, core_info = info
ident_class = ehdr['ident']['class']
if ident_class == '32-bit':
psize = 4
long_struct = '<L'
elif ident_class == '64-bit':
psize = 8
long_struct = '<Q'
else:
raise ValueError, "I'm confused"
if __name__=='__main__':
usage = """\
usage: python %s <exe-file> <core-file>
python %s -h|--help""" %(sys.argv[0], sys.argv[0])
if '-h' in sys.argv or '--help' in sys.argv:
print """\
To use this, do:
%s
If there were any shared libraries, either make sure they are in the same
location from where the binary imported them, or stick them all into the
current directory.
""" %(usage,)
sys.exit()
if len(sys.argv) < 3:
print usage
sys.exit()
exe_path = sys.argv[1]
core_path = sys.argv[2]
maps = []
# core file must be first...
map_file (core_path)
map_file (exe_path)
# set the size of a pointer
set_psize()
# skip first one, it's the exe, which is already mapped
solibs = find_solibs()[1:]
transplant = 0
for addr, path in solibs:
if not os.path.isfile (path):
# try the current directory
probe = os.path.split(path)[-1]
if os.path.isfile (probe):
transplant = 1
path = probe
else:
print 'unable to find %s' % (path,)
path = None
if path:
map_file (path, addr)
if transplant:
print '[transplant]'
read_symbols()
base, info = elf_data[core_path]
ehdr, phdrs, shdrs, syms, core_info = info
command = core_info.get('command')
death_signal = core_info.get('signal')
if command:
print 'Core was generated by "%s"' %(command)
if death_signal:
print 'Program terminated with signal %d' %(death_signal)
# -------------------------------------------------------------------------
# this eats up about 10MB of memory, which we probably don't need any more.
# -------------------------------------------------------------------------
elf_data.clear()
import code
banner = """\
Welcome to browse_core.
"""
code.interact(banner=banner, local=locals())
|
|
# Created by: Jack Button, Aditya Dua
# 10 June, 2017
import unittest
import numpy as np
from math import pi
from .test_common import matrices_equal, matrix_mismatch_string_builder
from ..base import transforms
# ---------------------------------------------------------------------------------------#
# 3D Transforms
# ---------------------------------------------------------------------------------------#
# angvec2r | ready
# angvec2tr | ready
# rotx | complete
class TestRotx(unittest.TestCase):
def test_transforms_3d_rotx_validData_returnDatatype(self):
self.assertIsInstance(transforms.rotx(0), np.matrix)
def test_transforms_3d_rotx_validData_returnData_dimension(self):
dimensions = transforms.rotx(0).shape
self.assertEqual(dimensions, (3, 3))
def test_transforms_3d_rotx_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.rotx(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
received_mat = transforms.rotx(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
received_mat = transforms.rotx(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
received_mat = transforms.rotx(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.rotx(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.rotx(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.rotx(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
received_mat = transforms.rotx(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
received_mat = transforms.rotx(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 0, 1], [0, -1, 0]])
received_mat = transforms.rotx(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 0, -1], [0, 1, 0]])
received_mat = transforms.rotx(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotx_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.rotx, 'invalid', unit='deg')
def test_transforms_3d_rotx_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.rotx,
180, unit='invalid unit')
def test_transforms_3d_rotx_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.rotx, 180, unit=True)
def test_transforms_3d_rotx_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.rotx, 180, unit=5)
# roty | complete
class Testroty(unittest.TestCase):
def test_transforms_3d_roty_validData_returnDatatype(self):
self.assertIsInstance(transforms.roty(0), np.matrix)
def test_transforms_3d_roty_validData_returnData_dimension(self):
dimensions = transforms.roty(0).shape
self.assertEqual(dimensions, (3, 3))
def test_transforms_3d_roty_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.roty(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[0., 0., 1.], [0, 1, 0.], [-1, 0., 0.]])
received_mat = transforms.roty(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[-1., 0., 0.], [0, 1, 0.], [-0, 0., -1.]])
received_mat = transforms.roty(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[-0., 0., -1.], [0, 1, 0.], [1, 0., -0.]])
received_mat = transforms.roty(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.roty(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.roty(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.roty(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[0., 0., 1.], [0, 1, 0.], [-1, 0., 0.]])
received_mat = transforms.roty(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[-1., 0., 0.], [0., 1., 0.], [-0., 0., -1.]])
received_mat = transforms.roty(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[-0., 0., -1.], [0, 1, 0.], [1, 0., -0.]])
received_mat = transforms.roty(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[0., 0., 1.], [0, 1, 0.], [-1, 0., 0.]])
received_mat = transforms.roty(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_roty_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.roty, 'invalid', unit='deg')
def test_transforms_3d_roty_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.roty,
180, unit='invalid unit')
def test_transforms_3d_roty_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.roty, 180, unit=True)
def test_transforms_3d_roty_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.roty, 180, unit=5)
# rotz | complete
class Testrotz(unittest.TestCase):
def test_transforms_3d_rotz_validData_returnDatatype(self):
self.assertIsInstance(transforms.rotz(0), np.matrix)
def test_transforms_3d_rotz_validData_returnData_dimension(self):
dimensions = transforms.rotz(0).shape
self.assertEqual(dimensions, (3, 3))
def test_transforms_3d_rotz_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.rotz(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[0., -1., 0.], [1, 0, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[-1., -0., 0.], [0, -1, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[-0., 1., 0.], [-1, -0, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1., 0., 0.], [-0, 1, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.rotz(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
received_mat = transforms.rotz(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[0., -1., 0.], [1, 0, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[-1., -0., 0.], [0, -1, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[-0., 1., 0.], [-1, -0, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[0., -1., 0.], [1, 0, 0.], [0, 0., 1.]])
received_mat = transforms.rotz(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_rotz_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.rotz, 'invalid', unit='deg')
def test_transforms_3d_rotz_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.rotz,
180, unit='invalid unit')
def test_transforms_3d_rotz_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.rotz, 180, unit=True)
def test_transforms_3d_rotz_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.rotz, 180, unit=5)
# trotx | complete
class Testtrotx(unittest.TestCase):
def test_transforms_3d_trotx_validData_returnDatatype(self):
self.assertIsInstance(transforms.trotx(0), np.matrix)
def test_transforms_3d_trotx_validData_returnData_dimension(self):
dimensions = transforms.trotx(0).shape
self.assertEqual(dimensions, (4, 4))
def test_transforms_3d_trotx_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 1., -0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 0., -1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., -1., -0., 0.], [0., 0., -1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., -0., 1., 0.], [0., -1., -0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., -0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 1., -0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., -0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 0., -1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., -1., -0., 0.], [0., 0., -1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., -0., 1., 0.], [0., -1., -0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 0., -1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotx(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotx_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.trotx, 'invalid', unit='deg')
def test_transforms_3d_trotx_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.trotx,
180, unit='invalid unit')
def test_transforms_3d_trotx_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.trotx, 180, unit=True)
def test_transforms_3d_trotx_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.trotx, 180, unit=5)
# troty | complete
class Testtroty(unittest.TestCase):
def test_transforms_3d_troty_validData_returnDatatype(self):
self.assertIsInstance(transforms.troty(0), np.matrix)
def test_transforms_3d_troty_validData_returnData_dimension(self):
dimensions = transforms.troty(0).shape
self.assertEqual(dimensions, (4, 4))
def test_transforms_3d_troty_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 1., 0., 0.], [-0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[0., 0., 1., 0.], [0., 1., 0., 0.], [-1., 0., 0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[-1., 0., 0., 0.], [0., 1., 0., 0.], [-0., 0., -1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[-0., 0., -1., 0.], [0., 1., 0., 0.], [1., 0., -0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1., 0., -0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 1., 0., 0.], [-0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1., 0., -0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[0., 0., 1., 0.], [0., 1., 0., 0.], [-1., 0., 0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[-1., 0., 0., 0.], [0., 1., 0., 0.], [-0., 0., -1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[-0., 0., -1., 0.], [0., 1., 0., 0.], [1., 0., -0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[0., 0., 1., 0.], [0., 1., 0., 0.], [-1., 0., 0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.troty(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_troty_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.troty, 'invalid', unit='deg')
def test_transforms_3d_troty_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.troty,
180, unit='invalid unit')
def test_transforms_3d_troty_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.troty, 180, unit=True)
def test_transforms_3d_troty_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.troty, 180, unit=5)
# trotz | complete
class Testtrotz(unittest.TestCase):
def test_transforms_3d_trotz_validData_returnDatatype(self):
self.assertIsInstance(transforms.trotz(0), np.matrix)
def test_transforms_3d_trotz_validData_returnData_dimension(self):
dimensions = transforms.trotz(0).shape
self.assertEqual(dimensions, (4, 4))
def test_transforms_3d_trotz_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1., -0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[0., -1., 0., 0.], [1., 0., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[-1., -0., 0., 0.], [0., -1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[-0., 1., 0., 0.], [-1., -0., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [-0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1., -0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [-0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[0., -1., 0., 0.], [1., 0., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[-1., -0., 0., 0.], [0., -1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[-0., 1., 0., 0.], [-1., -0., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[0., -1., 0., 0.], [1., 0., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.trotz(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_3d_trotz_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.trotz, 'invalid', unit='deg')
def test_transforms_3d_trotz_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.trotz,
180, unit='invalid unit')
def test_transforms_3d_trotz_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.trotz, 180, unit=True)
def test_transforms_3d_trotz_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.trotz, 180, unit=5)
# r2t
class TestR2t(unittest.TestCase):
def test_transforms_r2t_validData_returnDatatype(self): # pass
self.assertIsInstance(transforms.r2t(transforms.rotx(0)), np.matrix)
def test_transforms_r2t_validData_returnData_dimension(self): # pass
dimensions = transforms.r2t(transforms.rotx(0)).shape
self.assertEqual(dimensions, (4, 4))
def test_transforms_r2t_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 1., -0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
received_mat = transforms.r2t(transforms.rotx(0))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_r2t_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[1., 0., 0., 0.], [0., 0., -1., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.]])
received_mat = transforms.r2t(transforms.rotx(pi / 2))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# t2r
class TestT2r(unittest.TestCase):
def test_transforms_t2r_validData_returnDatatype(self): # pass
self.assertIsInstance(transforms.t2r(transforms.trotx(0)), np.matrix)
def test_transforms_t2r_validData_returnData_dimension(self): # pass
dimensions = transforms.t2r(transforms.trotx(0)).shape
self.assertEqual(dimensions, (3, 3))
def test_transforms_t2r_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 1, -0], [0, 0, 1]])
received_mat = transforms.t2r(transforms.trotx(0))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_t2r_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[1, 0, 0], [0, 0, -1], [0, 1, 0.]])
received_mat = transforms.t2r(transforms.trotx(pi / 2))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# # rpy2r | ready
# class TestRpy2r(unittest.TestCase):
# def test_transforms_rpy2r_validData_returnDatatype(self): # pass
# self.assertIsInstance(transforms.rpy2r([[11, 1, 1]]), np.matrix)
# oa2tr
class TestOa2tr(unittest.TestCase):
def test_transforms_oa2tr_validData_returnDatatype(self): # pass
self.assertIsInstance(transforms.oa2tr([[1, 0, 1]], [[1, 1, 1]]), np.matrix)
# to test:
# tr2rt
# rt2tr
# trlog
# trexp
# ---------------------------------------------------------------------------------------#
# 2D Transforms
# ---------------------------------------------------------------------------------------#
# rot2
class Testrot2(unittest.TestCase):
def test_transforms_2d_rot2_validData_returnDatatype(self):
self.assertIsInstance(transforms.rot2(0), np.matrix)
def test_transforms_2d_rot2_validData_returnData_dimension(self):
dimensions = transforms.rot2(0).shape
self.assertEqual(dimensions, (2, 2))
def test_transforms_2d_rot2_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1, 0], [0, 1]])
received_mat = transforms.rot2(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[0, -1, ], [1, 0]])
received_mat = transforms.rot2(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[-1, -0, ], [0, -1]])
received_mat = transforms.rot2(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[-0, 1, ], [-1, -0]])
received_mat = transforms.rot2(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1, 0, ], [-0, 1]])
received_mat = transforms.rot2(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1, -0, ], [0, 1]])
received_mat = transforms.rot2(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1, 0, ], [-0, 1]])
received_mat = transforms.rot2(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[0, -1, ], [1, 0]])
received_mat = transforms.rot2(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[-1, -0, ], [0, -1]])
received_mat = transforms.rot2(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[-0, 1, ], [-1, -0]])
received_mat = transforms.rot2(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[0, -1, ], [1, 0]])
received_mat = transforms.rot2(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_rot2_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.rot2, 'invalid', unit='deg')
def test_transforms_2d_rot2_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.rot2,
180, unit='invalid unit')
def test_transforms_2d_rot2_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.rot2, 180, unit=True)
def test_transforms_2d_rot2_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.rot2, 180, unit=5)
# trot2
class Testtrot2(unittest.TestCase):
def test_transforms_2d_trot2_validData_returnDatatype(self):
self.assertIsInstance(transforms.trot2(0), np.matrix)
def test_transforms_2d_trot2_validData_returnData_dimension(self):
dimensions = transforms.trot2(0).shape
self.assertEqual(dimensions, (3, 3))
def test_transforms_2d_trot2_validData_boundaryCondition_0_rad(self):
expected_mat = np.matrix([[1., -0., 0.], [0., 1., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(0)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_pi_by2_rad(self):
expected_mat = np.matrix([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_pi_rad(self):
expected_mat = np.matrix([[-1., -0., 0.], [0., -1., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_three_pi_by2_rad(self):
expected_mat = np.matrix([[-0., 1., 0.], [-1., -0., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(3 * pi / 2)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_2pi_rad(self):
expected_mat = np.matrix([[1., 0., 0.], [-0., 1., 0.], [0, 0, 1]])
received_mat = transforms.trot2(2 * pi)
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_0_deg(self):
expected_mat = np.matrix([[1., -0., 0.], [0., 1., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(0, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_360_deg(self):
expected_mat = np.matrix([[1., 0., 0.], [-0., 1., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(360, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_90_deg(self):
expected_mat = np.matrix([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(90, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_180_deg(self):
expected_mat = np.matrix([[-1., -0., 0.], [0., -1., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(180, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_270_deg(self):
expected_mat = np.matrix([[-0., 1., 0.], [-1., -0., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(270, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_validData_boundaryCondition_450_deg(self):
expected_mat = np.matrix([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
received_mat = transforms.trot2(450, unit='deg')
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_2d_trot2_invalidData_arg1_string(self):
self.assertRaises(TypeError, transforms.trot2, 'invalid', unit='deg')
def test_transforms_2d_trot2_invalidData_arg2_string_mismatch(self):
self.assertRaises(AssertionError, transforms.trot2,
180, unit='invalid unit')
def test_transforms_2d_trot2_invalidData_arg2_bool(self):
self.assertRaises(AssertionError, transforms.trot2, 180, unit=True)
def test_transforms_2d_trot2_invalidData_arg2_int(self):
self.assertRaises(AssertionError, transforms.trot2, 180, unit=5)
# trexp2
class Testtrexp2(unittest.TestCase):
def test_transforms_2d_trexp2_validData_returnDatatype(self):
self.assertIsInstance(transforms.trexp2(transforms.rot2(10)), np.matrix)
# ---------------------------------------------------------------------------------------#
# Differential Motion
# ---------------------------------------------------------------------------------------#
# skew
class TestSkew(unittest.TestCase):
# Tests for if the vector is 1
# Ensure matrix is returned
def test_transforms_dif_skew_validData_returnDatatype(self):
self.assertIsInstance(transforms.skew(np.matrix([1])), np.matrix)
# Check Matrix Dimensions vectorsize=1
def test_transforms_dif_skew_validData_returnData_dimension(self):
dimensions = transforms.skew(np.matrix([1])).shape
self.assertEqual(dimensions, (2, 2))
# Tests for if the vector is 3
# Ensure matrix is returned
def test_transforms_dif_skew_validData_returnDatatype_v3(self):
self.assertIsInstance(transforms.skew(np.matrix([1, 1, 1])), np.matrix)
# Check Matrix Dimensions vectorsize=1
def test_transforms_dif_skew_validData_returnData_dimension_v3(self):
dimensions = transforms.skew(np.matrix([1, 1, 1])).shape
self.assertEqual(dimensions, (3, 3))
# boundary for vectore size of 1
def test_transforms_dif_skew_validData_boundaryCondition_1(self):
expected_mat = np.matrix([[0, -1], [1, 0]])
received_mat = transforms.skew(np.matrix([1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skew_validData_boundaryCondition_2(self):
expected_mat = np.matrix([[0, -2], [2, 0]])
received_mat = transforms.skew(np.matrix([2]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skew_validData_boundaryCondition_3(self):
expected_mat = np.matrix([[0, -3], [3, 0]])
received_mat = transforms.skew(np.matrix([3]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skew_validData_boundaryCondition_4(self):
expected_mat = np.matrix([[0, -4], [4, 0]])
received_mat = transforms.skew(np.matrix([4]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skew_validData_boundaryCondition_5(self):
expected_mat = np.matrix([[0, -5], [5, 0]])
received_mat = transforms.skew(np.matrix([5]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# boundary tests if 3 vector
def test_transforms_dif_skew_validData_boundaryCondition_111(self):
expected_mat = np.matrix([[0, -1, 1], [1, 0, -1], [-1, 1, 0]])
received_mat = transforms.skew(np.matrix([1, 1, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skew_validData_boundaryCondition_101(self):
expected_mat = np.matrix([[0, -1, 0], [1, 0, -1], [0, 1, 0]])
received_mat = transforms.skew(np.matrix([1, 0, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skew_validData_boundaryCondition_100(self):
expected_mat = np.matrix([[0, 0, 0], [0, 0, -1], [0, 1, 0]])
received_mat = transforms.skew(np.matrix([1, 0, 0]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skew_validData_boundaryCondition_321(self):
expected_mat = np.matrix([[0, -1, 2], [1, 0, -3], [-2, 3, 0]])
received_mat = transforms.skew(np.matrix([3, 2, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# skewa
class TestSkewa(unittest.TestCase):
# Tests for if the vector is 3x1
# Ensure matrix is returned
def test_transforms_dif_skewa_validData_returnDatatype(self):
self.assertIsInstance(transforms.skewa(np.matrix([1, 1, 1])), np.matrix)
# Check Matrix Dimensions vectorsize=3x1
def test_transforms_dif_skewa_validData_returnData_dimension(self):
dimensions = transforms.skewa(np.matrix([1, 1, 1])).shape
self.assertEqual(dimensions, (3, 3))
# Tests for if the vector is 6x1
# Ensure matrix is returned
def test_transforms_dif_skewa_validData_returnDatatype_v3(self):
self.assertIsInstance(transforms.skewa(np.matrix([1, 1, 1, 1, 1, 1])), np.matrix)
# Check Matrix Dimensions of 4x4 if v = 6x1
def test_transforms_dif_skew_validData_returnData_dimension_v3(self):
dimensions = transforms.skewa(np.matrix([1, 1, 1, 1, 1, 1])).shape
self.assertEqual(dimensions, (4, 4))
# boundary for vectore size of 1
def test_transforms_dif_skewa_validData_boundaryCondition_1(self):
expected_mat = np.matrix([[0, -1, 1], [1, 0, 1], [0, 0, 0]])
received_mat = transforms.skewa(np.matrix([1, 1, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skewa_validData_boundaryCondition_2(self):
expected_mat = np.matrix([[0, -3, 1], [3, 0, 2], [0, 0, 0]])
received_mat = transforms.skewa(np.matrix([1, 2, 3]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# def test_transforms_dif_skewa_validData_boundaryCondition_2_6x1(self):
# expected_mat = np.matrix([[1, 0], [0, 1]])
# received_mat = transforms.skewa(np.matrix([]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skewa_validData_boundaryCondition_2_4x4(self):
expected_mat = np.matrix([[0, -1, 1, 1], [1, 0, -1, 0], [-1, 1, 0, 1], [0, 0, 0, 0]])
received_mat = transforms.skewa(np.matrix([1, 0, 1, 1, 1, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# boundary tests if 3 vector
def test_transforms_dif_skewa_validData_boundaryCondition_111(self):
expected_mat = np.matrix([[0, -1, 1], [1, 0, 1], [0, 0, 0]])
received_mat = transforms.skewa(np.matrix([1, 1, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skewa_validData_boundaryCondition_101(self):
expected_mat = np.matrix([[0, -1, 1], [1, 0, 0], [0, 0, 0]])
received_mat = transforms.skewa(np.matrix([1, 0, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skewa_validData_boundaryCondition_100(self):
expected_mat = np.matrix([[0, 0, 1], [0, 0, 0], [0, 0, 0]])
received_mat = transforms.skewa(np.matrix([1, 0, 0]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skewa_validData_boundaryCondition_123(self):
expected_mat = np.matrix([[0, -3, 1], [3, 0, 2], [0, 0, 0]])
received_mat = transforms.skewa(np.matrix([1, 2, 3]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
def test_transforms_dif_skewa_validData_boundaryCondition_321(self):
expected_mat = np.matrix([[0, -1, 3], [1, 0, 2], [0, 0, 0]])
received_mat = transforms.skewa(np.matrix([3, 2, 1]))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# vex
class TestVex(unittest.TestCase):
# test for 3x3 matrix
def test_transforms_dif_vex_validData_returnDatatype1(self):
self.assertIsInstance(transforms.vex(transforms.rotx(30)), np.matrix)
# ensure returns 3x1 if matrix is 3x3
def test_transforms_dif_vex_validData_returnData_dimension1(self):
dimensions = transforms.vex(transforms.rotx(30)).shape
self.assertEqual(dimensions, (3, 1))
def test_transforms_dif_vex_validData_returnDatatype2(self):
self.assertIsInstance(transforms.vex(transforms.rot2(0)), np.matrix)
# ensure returns 1 if matrix is 2x2
def test_transforms_dif_vex_validData_returnData_dimension2(self):
dimensions = transforms.vex(transforms.rot2(30)).shape
self.assertEqual(dimensions, (1, 1))
def test_transforms_dif_vex_validData_boundaryCondition_rot_0(self):
expected_mat = np.matrix([[0.], [0.], [0.]])
received_mat = transforms.vex(transforms.roty(0))
if not matrices_equal(received_mat, expected_mat, ):
output_str = matrix_mismatch_string_builder(
expected_mat, received_mat)
self.fail(output_str)
# # check whats going on herie
# def test_transforms_dif_vex_validData_boundaryCondition_roty_30(self):
# expected_mat = np.matrix([[0.], [-0.98803162], [0.]])
# received_mat = transforms.vex(transforms.roty(30))
#
# if not matrices_equal(received_mat, expected_mat, ):
# output_str = matrix_mismatch_string_builder(
# expected_mat, received_mat)
# self.fail(output_str)
# ---------------------------------------------------------------------------------------#
# Utility
# ---------------------------------------------------------------------------------------#
# unit
if __name__ == "__main__":
unittest.main()
|
|
import numpy as np
from scipy.stats import skew, kurtosis, shapiro, pearsonr, ansari, mood, levene, fligner, bartlett, mannwhitneyu
from scipy.spatial.distance import braycurtis, canberra, chebyshev, cityblock, correlation, cosine, euclidean, hamming, jaccard, kulsinski, matching, russellrao, sqeuclidean
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model import Ridge, LinearRegression, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score, accuracy_score, roc_auc_score, average_precision_score, f1_score, hinge_loss, matthews_corrcoef, precision_score, recall_score, zero_one_loss
from sklearn.metrics.cluster import adjusted_mutual_info_score, adjusted_rand_score, completeness_score, homogeneity_completeness_v_measure, homogeneity_score, mutual_info_score, normalized_mutual_info_score, v_measure_score
from boomlet.utils.aggregators import to_aggregator
from boomlet.metrics import max_error, error_variance, relative_error_variance, gini_loss, categorical_gini_loss
from boomlet.transform.type_conversion import Discretizer
from autocause.feature_functions import *
from autocause.converters import NUMERICAL_TO_NUMERICAL, NUMERICAL_TO_CATEGORICAL, BINARY_TO_NUMERICAL, BINARY_TO_CATEGORICAL, CATEGORICAL_TO_NUMERICAL, CATEGORICAL_TO_CATEGORICAL
"""
Functions used to combine a list of features into one coherent one.
Sample use:
1. to convert categorical to numerical, we perform a one hot encoding
2. treat each binary column as a separate numerical feature
3. compute numerical features as usual
4. use each of the following functions to create a new feature
(with the input as the nth feature for each of the columns)
WARNING: these will be used in various locations throughout the code base
and will result in feature size growing at faster than a linear rate
"""
AGGREGATORS = [
to_aggregator("max"),
to_aggregator("min"),
to_aggregator("median"),
to_aggregator("mode"),
to_aggregator("mean"),
# to_aggregator("sum"),
]
"""
Boolean flags specifying whether or not to perform conversions
"""
CONVERT_TO_NUMERICAL = True
CONVERT_TO_CATEGORICAL = False
"""
Functions that compute a metric on a single 1-D array
"""
UNARY_NUMERICAL_FEATURES = [
normalized_entropy,
skew,
kurtosis,
np.std,
shapiro,
]
UNARY_CATEGORICAL_FEATURES = [
lambda x: len(set(x)), # number of unique
]
"""
Functions that compute a metric on two 1-D arrays
"""
BINARY_NN_FEATURES = [
independent_component,
chi_square,
pearsonr,
correlation_magnitude,
braycurtis,
canberra,
chebyshev,
cityblock,
correlation,
cosine,
euclidean,
hamming,
sqeuclidean,
ansari,
mood,
levene,
fligner,
bartlett,
mannwhitneyu,
]
BINARY_NC_FEATURES = [
]
BINARY_CN_FEATURES = [
categorical_numerical_homogeneity,
bucket_variance,
anova,
]
BINARY_CC_FEATURES = [
categorical_categorical_homogeneity,
anova,
dice_,
jaccard,
kulsinski,
matching,
rogerstanimoto_,
russellrao,
sokalmichener_,
sokalsneath_,
yule_,
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
v_measure_score,
]
"""
Dictionaries of input type (e.g. B corresponds to pairs where binary
data is the input) to pairs of converter functions and a boolean flag
of whether or not to aggregate over the output of the converter function
converter functions should have the type signature:
converter(X_raw, X_current_type, Y_raw, Y_type)
where X_raw is the data to convert
"""
NUMERICAL_CONVERTERS = dict(
N=NUMERICAL_TO_NUMERICAL["identity"],
B=BINARY_TO_NUMERICAL["identity"],
C=CATEGORICAL_TO_NUMERICAL["pca1"],
)
CATEGORICAL_CONVERTERS = dict(
N=NUMERICAL_TO_CATEGORICAL["discretizer10"],
B=BINARY_TO_CATEGORICAL["identity"],
C=CATEGORICAL_TO_CATEGORICAL["identity"],
)
"""
Whether or not the converters can result in a 2D output. This must be set to True
if any of the respective converts can return a 2D output.
"""
NUMERICAL_CAN_BE_2D = False
CATEGORICAL_CAN_BE_2D = False
"""
Estimators used to provide a fit for a variable
"""
REGRESSION_ESTIMATORS = [
# Ridge(),
# LinearRegression(),
DecisionTreeRegressor(random_state=0),
# RandomForestRegressor(random_state=0),
# GradientBoostingRegressor(subsample=0.5, n_estimators=10, random_state=0),
# KNeighborsRegressor(),
]
CLASSIFICATION_ESTIMATORS = [
LogisticRegression(random_state=0),
DecisionTreeClassifier(random_state=0),
RandomForestClassifier(random_state=0),
GradientBoostingClassifier(subsample=0.5, n_estimators=10, random_state=0),
KNeighborsClassifier(),
GaussianNB(),
]
"""
Functions to provide a value of how good a fit on a variable is
"""
REGRESSION_METRICS = [
explained_variance_score,
mean_absolute_error,
mean_squared_error,
r2_score,
max_error,
error_variance,
relative_error_variance,
gini_loss,
] + BINARY_NN_FEATURES
REGRESSION_RESIDUAL_METRICS = [
] + UNARY_NUMERICAL_FEATURES
BINARY_PROBABILITY_CLASSIFICATION_METRICS = [
roc_auc_score,
hinge_loss,
] + REGRESSION_METRICS
RESIDUAL_PROBABILITY_CLASSIFICATION_METRICS = [
] + REGRESSION_RESIDUAL_METRICS
BINARY_CLASSIFICATION_METRICS = [
accuracy_score,
average_precision_score,
f1_score,
matthews_corrcoef,
precision_score,
recall_score,
zero_one_loss,
categorical_gini_loss,
]
ND_CLASSIFICATION_METRICS = [ # metrics for N-dimensional classification
] + BINARY_CC_FEATURES
"""
Functions to assess the model (e.g. complexity) of the fit on a numerical variable
of type signature:
metric(clf, X, y)
"""
REGRESSION_MODEL_METRICS = [
# TODO model complexity metrics
]
CLASSIFICATION_MODEL_METRICS = [
# TODO use regression model metrics on predict_proba
]
"""
The operations to perform on the A->B features and B->A features.
"""
RELATIVE_FEATURES = [
# Identity functions, comment out the next 2 lines for only relative features
lambda x, y: x,
lambda x, y: y,
lambda x, y: x - y,
]
"""
Whether or not to treat each observation (A,B) as two observations: (A,B) and (B,A)
If this is done and training labels are given, those labels will have to be
reflected as well. The reflection is performed through appending at the end.
(e.g. if we have N training examples, observation N+1 in the output will be
the first example reflected)
"""
REFLECT_DATA = False
"""
Whether or not metafeatures based on the types of A and B are generated.
e.g. 1/0 feature on whether or not A is Numerical, etc.
"""
ADD_METAFEATURES = True
"""
Whether or not to generate combination features between the computed
features and metafeatures.
e.g. for each feature and metafeature, generate a new feature which is the
product of the two
WARNING: will generate a LOT of features (approximately 21 times as many)
"""
COMPUTE_METAFEATURE_COMBINATIONS = False
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert checkpoints using RNNCells to new name convention.
Usage:
python checkpoint_convert.py [--write_v1_checkpoint] \
'/path/to/checkpoint' '/path/to/new_checkpoint'
For example, if there is a V2 checkpoint to be converted and the files include:
/tmp/my_checkpoint/model.ckpt.data-00000-of-00001
/tmp/my_checkpoint/model.ckpt.index
/tmp/my_checkpoint/model.ckpt.meta
use the following command:
mkdir /tmp/my_converted_checkpoint &&
python checkpoint_convert.py \
/tmp/my_checkpoint/model.ckpt /tmp/my_converted_checkpoint/model.ckpt
This will generate three converted checkpoint files corresponding to the three
old ones in the new directory:
/tmp/my_converted_checkpoint/model.ckpt.data-00000-of-00001
/tmp/my_converted_checkpoint/model.ckpt.index
/tmp/my_converted_checkpoint/model.ckpt.meta
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import re
import sys
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as saver_lib
_RNN_NAME_REPLACEMENTS = collections.OrderedDict([
############################################################################
# contrib/rnn/python/ops/core_rnn_cell_impl.py
# BasicRNNCell
('basic_rnn_cell/weights', 'basic_rnn_cell/kernel'),
('basic_rnn_cell/biases', 'basic_rnn_cell/bias'),
# GRUCell
('gru_cell/weights', 'gru_cell/kernel'),
('gru_cell/biases', 'gru_cell/bias'),
('gru_cell/gates/weights', 'gru_cell/gates/kernel'),
('gru_cell/gates/biases', 'gru_cell/gates/bias'),
('gru_cell/candidate/weights', 'gru_cell/candidate/kernel'),
('gru_cell/candidate/biases', 'gru_cell/candidate/bias'),
# BasicLSTMCell
('basic_lstm_cell/weights', 'basic_lstm_cell/kernel'),
('basic_lstm_cell/biases', 'basic_lstm_cell/bias'),
# LSTMCell
('lstm_cell/weights', 'lstm_cell/kernel'),
('lstm_cell/biases', 'lstm_cell/bias'),
('lstm_cell/projection/weights', 'lstm_cell/projection/kernel'),
('lstm_cell/projection/biases', 'lstm_cell/projection/bias'),
# OutputProjectionWrapper
('output_projection_wrapper/weights', 'output_projection_wrapper/kernel'),
('output_projection_wrapper/biases', 'output_projection_wrapper/bias'),
# InputProjectionWrapper
('input_projection_wrapper/weights', 'input_projection_wrapper/kernel'),
('input_projection_wrapper/biases', 'input_projection_wrapper/bias'),
############################################################################
# contrib/rnn/python/ops/lstm_ops.py
# LSTMBlockFusedCell ??
('lstm_block_wrapper/weights', 'lstm_block_wrapper/kernel'),
('lstm_block_wrapper/biases', 'lstm_block_wrapper/bias'),
############################################################################
# contrib/rnn/python/ops/rnn_cell.py
# LayerNormBasicLSTMCell
('layer_norm_basic_lstm_cell/weights', 'layer_norm_basic_lstm_cell/kernel'),
('layer_norm_basic_lstm_cell/biases', 'layer_norm_basic_lstm_cell/bias'),
# UGRNNCell, not found in g3, but still need it?
('ugrnn_cell/weights', 'ugrnn_cell/kernel'),
('ugrnn_cell/biases', 'ugrnn_cell/bias'),
# NASCell
('nas_rnn/weights', 'nas_rnn/kernel'),
('nas_rnn/recurrent_weights', 'nas_rnn/recurrent_kernel'),
# IntersectionRNNCell
('intersection_rnn_cell/weights', 'intersection_rnn_cell/kernel'),
('intersection_rnn_cell/biases', 'intersection_rnn_cell/bias'),
('intersection_rnn_cell/in_projection/weights',
'intersection_rnn_cell/in_projection/kernel'),
('intersection_rnn_cell/in_projection/biases',
'intersection_rnn_cell/in_projection/bias'),
# PhasedLSTMCell
('phased_lstm_cell/mask_gates/weights',
'phased_lstm_cell/mask_gates/kernel'),
('phased_lstm_cell/mask_gates/biases', 'phased_lstm_cell/mask_gates/bias'),
('phased_lstm_cell/new_input/weights', 'phased_lstm_cell/new_input/kernel'),
('phased_lstm_cell/new_input/biases', 'phased_lstm_cell/new_input/bias'),
('phased_lstm_cell/output_gate/weights',
'phased_lstm_cell/output_gate/kernel'),
('phased_lstm_cell/output_gate/biases',
'phased_lstm_cell/output_gate/bias'),
# AttentionCellWrapper
('attention_cell_wrapper/weights', 'attention_cell_wrapper/kernel'),
('attention_cell_wrapper/biases', 'attention_cell_wrapper/bias'),
('attention_cell_wrapper/attn_output_projection/weights',
'attention_cell_wrapper/attn_output_projection/kernel'),
('attention_cell_wrapper/attn_output_projection/biases',
'attention_cell_wrapper/attn_output_projection/bias'),
('attention_cell_wrapper/attention/weights',
'attention_cell_wrapper/attention/kernel'),
('attention_cell_wrapper/attention/biases',
'attention_cell_wrapper/attention/bias'),
])
_RNN_SHARDED_NAME_REPLACEMENTS = collections.OrderedDict([
('LSTMCell/W_', 'lstm_cell/weights/part_'),
('BasicLSTMCell/Linear/Matrix_', 'basic_lstm_cell/weights/part_'),
('GRUCell/W_', 'gru_cell/weights/part_'),
('MultiRNNCell/Cell', 'multi_rnn_cell/cell_'),
])
def _rnn_name_replacement(var_name):
for pattern in _RNN_NAME_REPLACEMENTS:
if pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(pattern, _RNN_NAME_REPLACEMENTS[pattern])
logging.info('Converted: %s --> %s' % (old_var_name, var_name))
break
return var_name
def _rnn_name_replacement_sharded(var_name):
for pattern in _RNN_SHARDED_NAME_REPLACEMENTS:
if pattern in var_name:
old_var_name = var_name
var_name = var_name.replace(pattern,
_RNN_SHARDED_NAME_REPLACEMENTS[pattern])
logging.info('Converted: %s --> %s' % (old_var_name, var_name))
return var_name
def _split_sharded_vars(name_shape_map):
"""Split shareded variables.
Args:
name_shape_map: A dict from variable name to variable shape.
Returns:
not_sharded: Names of the non-sharded variables.
sharded: Names of the sharded varibales.
"""
sharded = []
not_sharded = []
for name in name_shape_map:
if re.match(name, '_[0-9]+$'):
if re.sub('_[0-9]+$', '_1', name) in name_shape_map:
sharded.append(name)
else:
not_sharded.append(name)
else:
not_sharded.append(name)
return not_sharded, sharded
def convert_names(checkpoint_from_path,
checkpoint_to_path,
write_v1_checkpoint=False):
"""Migrates the names of variables within a checkpoint.
Args:
checkpoint_from_path: Path to source checkpoint to be read in.
checkpoint_to_path: Path to checkpoint to be written out.
write_v1_checkpoint: Whether the output checkpoint will be in V1 format.
Returns:
A dictionary that maps the new variable names to the Variable objects.
A dictionary that maps the old variable names to the new variable names.
"""
with ops.Graph().as_default():
logging.info('Reading checkpoint_from_path %s' % checkpoint_from_path)
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_from_path)
name_shape_map = reader.get_variable_to_shape_map()
not_sharded, sharded = _split_sharded_vars(name_shape_map)
new_variable_map = {}
conversion_map = {}
for var_name in not_sharded:
new_var_name = _rnn_name_replacement(var_name)
tensor = reader.get_tensor(var_name)
var = variables.Variable(tensor, name=var_name)
new_variable_map[new_var_name] = var
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
for var_name in sharded:
new_var_name = _rnn_name_replacement_sharded(var_name)
var = variables.Variable(tensor, name=var_name)
new_variable_map[new_var_name] = var
if new_var_name != var_name:
conversion_map[var_name] = new_var_name
write_version = (saver_pb2.SaverDef.V1
if write_v1_checkpoint else saver_pb2.SaverDef.V2)
saver = saver_lib.Saver(new_variable_map, write_version=write_version)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
logging.info('Writing checkpoint_to_path %s' % checkpoint_to_path)
saver.save(sess, checkpoint_to_path)
logging.info('Summary:')
logging.info(' Converted %d variable name(s).' % len(new_variable_map))
return new_variable_map, conversion_map
def main(_):
convert_names(
FLAGS.checkpoint_from_path,
FLAGS.checkpoint_to_path,
write_v1_checkpoint=FLAGS.write_v1_checkpoint)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument('checkpoint_from_path', type=str,
help='Path to source checkpoint to be read in.')
parser.add_argument('checkpoint_to_path', type=str,
help='Path to checkpoint to be written out.')
parser.add_argument('--write_v1_checkpoint', action='store_true',
help='Write v1 checkpoint')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LoadBalancersOperations(object):
"""LoadBalancersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2017-06-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def delete(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [204, 202, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, load_balancer_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LoadBalancer
<azure.mgmt.network.v2017_06_01.models.LoadBalancer>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load
balancer operation.
:type parameters: :class:`LoadBalancer
<azure.mgmt.network.v2017_06_01.models.LoadBalancer>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`LoadBalancer
<azure.mgmt.network.v2017_06_01.models.LoadBalancer>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LoadBalancer')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [201, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', response)
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LoadBalancerPaged
<azure.mgmt.network.v2017_06_01.models.LoadBalancerPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`LoadBalancerPaged
<azure.mgmt.network.v2017_06_01.models.LoadBalancerPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
|
|
# -*- coding: utf-8 -*-
"""
Volunteer Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Dashboard """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
redirect(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Volunteers
redirect(URL(f="volunteer", args="search"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined (unused, except for Imports)
"""
table = s3db.hrm_human_resource
_type = table.type
s3.filter = (_type == 2)
# Custom method for Service Record
s3db.set_method("hrm", "human_resource",
method="form",
action=s3db.hrm_service_record
)
def prep(r):
if r.method == "form":
return True
if r.interactive:
if r.method == "create" and not r.component:
redirect(URL(f="volunteer",
args=args,
vars=vars))
elif r.method == "delete":
# Don't redirect
pass
elif r.id:
# Redirect to person controller
vars = {
"human_resource.id" : r.id,
"group" : "volunteer"
}
redirect(URL(f="person",
vars=vars))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append({
"url": URL(f="compose",
vars = {"hrm_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Message"))})
elif r.representation == "plain" and \
r.method !="search":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
output = s3_rest_controller("hrm", "human_resource")
return output
# -----------------------------------------------------------------------------
def volunteer():
"""
Volunteer Controller
"""
tablename = "hrm_human_resource"
table = s3db[tablename]
_type = table.type
s3.filter = (_type == 2)
_location = table.location_id
_location.label = T("Home Address")
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
(settings.get_ui_label_mobile_phone(), "phone"),
"location_id",
(T("Trainings"), "course"),
(T("Certificates"), "certificate"),
(T("Email"), "email"),
]
get_config = s3db.get_config
report_options = get_config(tablename,
"report_options")
# Remove inappropriate filters from the Search widget
human_resource_search = get_config(tablename,
"search_method")
# Remove Facility
human_resource_search.advanced.pop(5)
if settings.get_hrm_vol_experience() == "programme":
enable_active_field = settings.set_org_dependent_field(None,
tablename = "vol_volunteer",
fieldname = "active")
# Add Programme Virtual Fields
table.virtualfields.append(s3db.hrm_programme_virtual_fields())
# Add VF to List Fields
if enable_active_field:
list_fields.insert(4, (T("Active?"), "active"))
list_fields.insert(6, (T("Programme"), "programme"))
# Add VF to Report Options
report_fields = report_options.rows
report_fields.append((T("Programme"), "programme"))
if enable_active_field:
report_fields.append((T("Active?"), "active"))
report_options.rows = report_fields
report_options.cols = report_fields
report_options.facts = report_fields
# Add VF to the Search Filters
# Remove deprecated Active/Obsolete
human_resource_search.advanced.pop(1)
table.status.readable = False
table.status.writable = False
if enable_active_field:
widget = s3base.S3SearchOptionsWidget(
name="human_resource_search_active",
label=T("Active?"),
field="active",
cols = 2,
options = {
T("Yes"): T("Yes"),
T("No"): T("No")
}
),
search_widget = ("human_resource_search_active", widget[0])
human_resource_search.advanced.insert(1, search_widget)
def hrm_programme_opts():
"""
Provide the options for the HRM programme search filter
"""
ptable = s3db.hrm_programme
root_org = auth.root_org()
if root_org:
query = (ptable.deleted == False) & \
((ptable.organisation_id == root_org) | \
(ptable.organisation_id == None))
else:
query = (ptable.deleted == False) & \
(ptable.organisation_id == None)
opts = db(query).select(ptable.id,
ptable.name)
_dict = {}
for opt in opts:
_dict[opt.id] = opt.name
return _dict
widget = s3base.S3SearchOptionsWidget(
name="human_resource_search_programme",
label=T("Programme"),
field="programme",
cols = 2,
options = hrm_programme_opts
),
search_widget = ("human_resource_search_programme", widget[0])
human_resource_search.advanced.insert(3, search_widget)
else:
list_fields.append("status")
s3.crud_strings[tablename] = s3.crud_strings["hrm_volunteer"]
s3db.configure(tablename,
list_fields = list_fields,
report_options = report_options,
search_method = human_resource_search)
def prep(r):
if r.interactive:
table = r.table
table.person_id.widget = S3AddPersonWidget(controller="vol")
if not r.component and \
not r.id and \
r.method in [None, "create"]:
# Don't redirect
# Assume staff only between 12-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972, future=-144)
_type.default = 2
_location.writable = True
_location.readable = True
table.code.writable = False
table.code.readable = False
table.department_id.writable = False
table.department_id.readable = False
table.essential.writable = False
table.essential.readable = False
table.site_contact.writable = False
table.site_contact.readable = False
table.status.writable = False
table.status.readable = False
elif r.method == "delete":
# Don't redirect
pass
elif r.id:
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "volunteer"
}
redirect(URL(f="person",
vars=vars))
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules:
# @ToDo: Remove this now that we have it in Events?
s3.actions.append({
"url": URL(f="compose",
vars = {"hrm_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Message"))
})
if settings.get_hrm_vol_experience() == "programme" and \
r.method not in ["search", "report", "import"] and \
"form" in output:
# Insert field to set the Programme
sep = ": "
table = s3db.hrm_programme_hours
field = table.programme_id
default = field.default
widget = field.widget or SQLFORM.widgets.options.widget(field, default)
field_id = "%s_%s" % (table._tablename, field.name)
label = field.label
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
programme = s3.crud.formstyle(row_id, label, widget, field.comment)
try:
output["form"][0].insert(4, programme[1])
except:
# A non-standard formstyle with just a single row
pass
try:
output["form"][0].insert(4, programme[0])
except:
pass
elif r.representation == "plain" and \
r.method !="search":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
output = s3_rest_controller("hrm", "human_resource")
return output
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for Personal Profile & Imports
- includes components relevant to HRM
"""
configure = s3db.configure
set_method = s3db.set_method
# Custom Method for Contacts
set_method("pr", resourcename,
method="contacts",
action=s3db.pr_contacts)
# Plug-in role matrix for Admins/OrgAdmins
realms = auth.user is not None and auth.user.realms or []
if ADMIN in realms or ORG_ADMIN in realms:
set_method("pr", resourcename, method="roles",
action=s3base.S3PersonRoleManager())
if settings.has_module("asset"):
# Assets as component of people
s3db.add_component("asset_asset",
pr_person="assigned_to_id")
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
group = request.get_vars.get("group", "volunteer")
hr_id = request.get_vars.get("human_resource.id", None)
if not str(hr_id).isdigit():
hr_id = None
# Configure human resource table
table = s3db.hrm_human_resource
table.type.default = 2
request.get_vars.update(xsltmode="volunteer")
if hr_id:
hr = table[hr_id]
if hr:
group = hr.type == 2 and "volunteer" or "staff"
# Also inform the back-end of this finding
request.get_vars["group"] = group
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
if settings.get_hrm_vol_experience() == "programme":
table.virtualfields.append(s3db.hrm_programme_person_virtual_fields())
configure(tablename,
deletable=False)
mode = session.s3.hrm.mode
if mode is not None:
# Configure for personal mode
s3db.hrm_human_resource.organisation_id.readable = True
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# People can view their own HR data, but not edit it
configure("hrm_human_resource",
insertable = False,
editable = False,
deletable = False)
configure("hrm_certification",
insertable = True,
editable = True,
deletable = True)
configure("hrm_credential",
insertable = False,
editable = False,
deletable = False)
configure("hrm_competency",
insertable = True, # Can add unconfirmed
editable = False,
deletable = False)
configure("hrm_training", # Can add but not provide grade
insertable = True,
editable = False,
deletable = False)
configure("hrm_experience",
insertable = False,
editable = False,
deletable = False)
configure("pr_group_membership",
insertable = False,
editable = False,
deletable = False)
else:
# Configure for HR manager mode
s3.crud_strings[tablename].update(
title_display = T("Volunteer Details"),
title_update = T("Volunteer Details"),
title_upload = T("Import Volunteers"),
)
# Upload for configuration (add replace option)
s3.importerPrep = lambda: dict(ReplaceOption=T("Remove existing data before import"))
# Import pre-process
def import_prep(data, group=group):
"""
Deletes all HR records (of the given group) of the organisation
before processing a new data import, used for the import_prep
hook in s3mgr
"""
resource, tree = data
xml = current.xml
tag = xml.TAG
att = xml.ATTRIBUTE
if s3.import_replace:
if tree is not None:
if group == "staff":
group = 1
elif group == "volunteer":
group = 2
else:
return # don't delete if no group specified
root = tree.getroot()
expr = "/%s/%s[@%s='org_organisation']/%s[@%s='name']" % \
(tag.root, tag.resource, att.name, tag.data, att.field)
orgs = root.xpath(expr)
for org in orgs:
org_name = org.get("value", None) or org.text
if org_name:
try:
org_name = json.loads(xml.xml_decode(org_name))
except:
pass
if org_name:
htable = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (otable.name == org_name) & \
(htable.organisation_id == otable.id) & \
(htable.type == group)
resource = s3base.S3Resource("hrm_human_resource", filter=query)
ondelete = s3db.get_config("hrm_human_resource", "ondelete")
resource.delete(ondelete=ondelete, format="xml", cascade=True)
s3mgr.import_prep = import_prep
# CRUD pre-process
def prep(r):
if r.representation == "s3json":
s3mgr.show_ids = True
elif r.interactive and r.method != "import":
if not r.component:
table = r.table
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
table.occupation.label = T("Normal Job")
table.pe_label.readable = False
table.pe_label.writable = False
table.missing.readable = False
table.missing.writable = False
table.age_group.readable = False
table.age_group.writable = False
# Organisation Dependent Fields
set_org_dependent_field = deployment_settings.set_org_dependent_field
person_details_table = s3db.pr_person_details
set_org_dependent_field(person_details_table.father_name)
set_org_dependent_field(person_details_table.mother_name)
set_org_dependent_field(person_details_table.affiliations)
set_org_dependent_field(person_details_table.company)
else:
if r.component_name == "human_resource":
table = r.component.table
table.code.writable = False
table.code.readable = False
table.department_id.writable = False
table.department_id.readable = False
table.essential.writable = False
table.essential.readable = False
#table.location_id.readable = True
#table.location_id.writable = True
table.site_id.writable = False
table.site_id.readable = False
table.site_contact.writable = False
table.site_contact.readable = False
org = session.s3.hrm.org
if org is not None:
field = table.organisation_id
field.default = org
field.readable = False
field.writable = False
elif r.component_name == "hours":
filter = (r.component.table.hours != None)
r.resource.add_component_filter("hours", filter)
elif r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = False
table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = True
table.ethnicity.readable = True
table.blood_type.writable = True
table.blood_type.readable = True
table.medical_conditions.writable = True
table.medical_conditions.readable = True
table.other_details.writable = True
table.other_details.readable = True
elif r.component_name == "asset":
# Edits should always happen via the Asset Log
# @ToDo: Allow this method too, if we can do so safely
configure("asset_asset",
insertable = False,
editable = False,
deletable = False)
resource = r.resource
if mode is not None:
r.resource.build_query(id=s3_logged_in_person())
else:
if not r.id and not hr_id:
# pre-action redirect => must retain prior errors
if response.error:
session.error = response.error
redirect(URL(r=r, f="volunteer"))
if resource.count() == 1:
resource.load()
r.record = resource.records().first()
if r.record:
r.id = r.record.id
if not r.record:
session.error = T("Record not found")
redirect(URL(f="volunteer",
args=["search"]))
if hr_id and r.component_name == "human_resource":
r.component_id = hr_id
configure("hrm_human_resource",
insertable = False)
return True
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if settings.get_hrm_vol_experience() == "programme" and \
r.method not in ["search", "report", "import"] and \
"form" in output:
# Insert field to set the Programme
sep = ": "
table = s3db.hrm_programme_hours
field = table.programme_id
if r.id:
query = (table.person_id == r.id)
default = db(query).select(table.programme_id,
orderby=table.date).last()
if default:
default = default.programme_id
else:
default = field.default
widget = field.widget or SQLFORM.widgets.options.widget(field, default)
field_id = "%s_%s" % (table._tablename, field.name)
label = field.label
label = LABEL(label, label and sep, _for=field_id,
_id=field_id + SQLFORM.ID_LABEL_SUFFIX)
row_id = field_id + SQLFORM.ID_ROW_SUFFIX
programme = s3.crud.formstyle(row_id, label, widget, field.comment)
try:
output["form"][0].insert(2, programme[1])
except:
# A non-standard formstyle with just a single row
pass
try:
output["form"][0].insert(2, programme[0])
except:
pass
elif r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
elif r.component_name == "asset":
# Provide a link to assign a new Asset
# @ToDo: Proper Widget to do this inline
output["add_btn"] = A(T("Assign Asset"),
_href=URL(c="asset", f="asset"),
_id="add-btn",
_class="action-btn")
return output
s3.postp = postp
# REST Interface
if session.s3.hrm.orgname and mode is None:
orgname = session.s3.hrm.orgname
else:
orgname = None
output = s3_rest_controller("pr", resourcename,
native=False,
rheader=s3db.hrm_rheader,
orgname=orgname,
replace_option=T("Remove existing data before import"),
csv_stylesheet=("hrm", "person.xsl"),
csv_extra_fields=[
dict(label="Type",
field=s3db.hrm_human_resource.type)
]
)
return output
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
s3.filter = (s3db.hrm_human_resource.type == 2)
s3db.configure("hrm_human_resource",
# S3HRSearch
search_method = s3db.hrm_autocomplete_search,
)
s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller("hrm", "human_resource")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
tablename = "pr_group"
table = s3db[tablename]
_group_type = table.group_type
_group_type.label = T("Team Type")
table.description.label = T("Team Description")
table.name.label = T("Team Name")
mtable = s3db.pr_group_membership
mtable.group_id.label = T("Team ID")
mtable.group_head.label = T("Team Leader")
# Set Defaults
_group_type.default = 3 # 'Relief Team'
_group_type.readable = _group_type.writable = False
# Only show Relief Teams
# Do not show system groups
s3.filter = (table.system == False) & \
(_group_type == 3)
# CRUD Strings
ADD_TEAM = T("Add Team")
s3.crud_strings[tablename] = Storage(
title_create = ADD_TEAM,
title_display = T("Team Details"),
title_list = T("Teams"),
title_update = T("Edit Team"),
title_search = T("Search Teams"),
subtitle_create = T("Add New Team"),
label_list_button = T("List Teams"),
label_create_button = T("Add New Team"),
label_search_button = T("Search Teams"),
msg_record_created = T("Team added"),
msg_record_modified = T("Team updated"),
msg_record_deleted = T("Team deleted"),
msg_list_empty = T("No Teams currently registered"))
s3.crud_strings["pr_group_membership"] = Storage(
title_create = T("Add Member"),
title_display = T("Membership Details"),
title_list = T("Team Members"),
title_update = T("Edit Membership"),
title_search = T("Search Member"),
subtitle_create = T("Add New Member"),
label_list_button = T("List Members"),
label_create_button = T("Add Team Member"),
label_delete_button = T("Delete Membership"),
msg_record_created = T("Team Member added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Members currently registered"))
configure = s3db.configure
configure(tablename, main="name", extra="description",
# Redirect to member list when a new group has been created
create_next = URL(f="group",
args=["[id]", "group_membership"]))
configure("pr_group_membership",
list_fields=["id",
"person_id",
"group_head",
"description"])
# Post-process
def postp(r, output):
if r.interactive:
if not r.component:
update_url = URL(args=["[id]", "group_membership"])
s3_action_buttons(r, deletable=False, update_url=update_url)
if "msg" in settings.modules:
s3.actions.append({
"url": URL(f = "compose",
vars = {"group_id": "[id]"}),
"_class": "action-btn",
"label": str(T("Send Notification"))})
return output
s3.postp = postp
tabs = [
(T("Team Details"), None),
# Team should be contacted either via the Leader or
# simply by sending a message to the group as a whole.
#(T("Contact Data"), "contact"),
(T("Members"), "group_membership")
]
output = s3_rest_controller("pr", resourcename,
rheader=lambda r: s3db.pr_rheader(r, tabs=tabs))
return output
# =============================================================================
# Jobs
# =============================================================================
def department():
""" Departments Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller("hrm", resourcename)
return output
def job_role():
""" Job Roles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller("hrm", resourcename)
return output
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller("hrm", resourcename)
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename)
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename)
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename)
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename)
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename,
rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename)
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
r.error(403, message=auth.permission.INSUFFICIENT_PRIVILEGES)
return True
s3.prep = prep
output = s3_rest_controller("hrm", resourcename,
rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename)
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3FilterFieldChange to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
#db.req_commit.date.represent = lambda dt: dt[:10]
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def programme():
""" Volunteer Programmes Controller """
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename,
rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def programme_hours():
"""
Volunteer Programme Hours Controller
- just meant for Imports
"""
mode = session.s3.hrm.mode
if mode is not None:
session.error = T("Access denied")
redirect(URL(f="index"))
output = s3_rest_controller("hrm", resourcename)
return output
# =============================================================================
def task():
"""
Tasks controller
"""
return s3db.project_task_controller()
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
|
|
"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
The various forms of classef (inherits nothing, inherits once, inherits
many) don't parse the same in the CST so we look at ALL classes for
a __metaclass__ and if we find one normalize the inherits to all be
an arglist.
For one-liner classes ('class X: pass') there is no indent/dedent so
we normalize those into having a suite.
Moving the __metaclass__ into the classdef can also cause the class
body to be empty so there is some special casing for that as well.
This fixer also tries very hard to keep original indenting and spacing
in all those corner cases.
"""
# Author: Jack Diederich
# Local imports
from .. import fixer_base
from ..pygram import token
from ..fixer_util import syms, Node, Leaf
def has_metaclass(parent):
""" we have to check the cls_node without changing it.
There are two possibilities:
1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
2) clsdef => simple_stmt => expr_stmt => Leaf('__meta')
"""
for node in parent.children:
if node.type == syms.suite:
return has_metaclass(node)
elif node.type == syms.simple_stmt and node.children:
expr_node = node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
left_side = expr_node.children[0]
if isinstance(left_side, Leaf) and \
left_side.value == '__metaclass__':
return True
return False
def fixup_parse_tree(cls_node):
""" one-line classes don't get a suite in the parse tree so we add
one to normalize the tree
"""
for node in cls_node.children:
if node.type == syms.suite:
# already in the preferred format, do nothing
return
# !%@#! oneliners have no suite node, we have to fake one up
for i, node in enumerate(cls_node.children):
if node.type == token.COLON:
break
else:
raise ValueError("No class suite and no ':'!")
# move everything into a suite node
suite = Node(syms.suite, [])
while cls_node.children[i+1:]:
move_node = cls_node.children[i+1]
suite.append_child(move_node.clone())
move_node.remove()
cls_node.append_child(suite)
node = suite
def fixup_simple_stmt(parent, i, stmt_node):
""" if there is a semi-colon all the parts count as part of the same
simple_stmt. We just want the __metaclass__ part so we move
everything after the semi-colon into its own simple_stmt node
"""
for semi_ind, node in enumerate(stmt_node.children):
if node.type == token.SEMI: # *sigh*
break
else:
return
node.remove() # kill the semicolon
new_expr = Node(syms.expr_stmt, [])
new_stmt = Node(syms.simple_stmt, [new_expr])
while stmt_node.children[semi_ind:]:
move_node = stmt_node.children[semi_ind]
new_expr.append_child(move_node.clone())
move_node.remove()
parent.insert_child(i, new_stmt)
new_leaf1 = new_stmt.children[0].children[0]
old_leaf1 = stmt_node.children[0].children[0]
new_leaf1.prefix = old_leaf1.prefix
def remove_trailing_newline(node):
if node.children and node.children[-1].type == token.NEWLINE:
node.children[-1].remove()
def find_metas(cls_node):
# find the suite node (Mmm, sweet nodes)
for node in cls_node.children:
if node.type == syms.suite:
break
else:
raise ValueError("No class suite!")
# look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
for i, simple_node in list(enumerate(node.children)):
if simple_node.type == syms.simple_stmt and simple_node.children:
expr_node = simple_node.children[0]
if expr_node.type == syms.expr_stmt and expr_node.children:
# Check if the expr_node is a simple assignment.
left_node = expr_node.children[0]
if isinstance(left_node, Leaf) and \
left_node.value == '__metaclass__':
# We found an assignment to __metaclass__.
fixup_simple_stmt(node, i, simple_node)
remove_trailing_newline(simple_node)
yield (node, i, simple_node)
def fixup_indent(suite):
""" If an INDENT is followed by a thing with a prefix then nuke the prefix
Otherwise we get in trouble when removing __metaclass__ at suite start
"""
kids = suite.children[::-1]
# find the first indent
while kids:
node = kids.pop()
if node.type == token.INDENT:
break
# find the first Leaf
while kids:
node = kids.pop()
if isinstance(node, Leaf) and node.type != token.DEDENT:
if node.prefix:
node.prefix = ''
return
else:
kids.extend(node.children[::-1])
class FixMetaclass(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
if not has_metaclass(node):
return
fixup_parse_tree(node)
# find metaclasses, keep the last one
last_metaclass = None
for suite, i, stmt in find_metas(node):
last_metaclass = stmt
stmt.remove()
text_type = node.children[0].type # always Leaf(nnn, 'class')
# figure out what kind of classdef we have
if len(node.children) == 7:
# Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
# 0 1 2 3 4 5 6
if node.children[3].type == syms.arglist:
arglist = node.children[3]
# Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
else:
parent = node.children[3].clone()
arglist = Node(syms.arglist, [parent])
node.set_child(3, arglist)
elif len(node.children) == 6:
# Node(classdef, ['class', 'name', '(', ')', ':', suite])
# 0 1 2 3 4 5
arglist = Node(syms.arglist, [])
node.insert_child(3, arglist)
elif len(node.children) == 4:
# Node(classdef, ['class', 'name', ':', suite])
# 0 1 2 3
arglist = Node(syms.arglist, [])
node.insert_child(2, Leaf(token.RPAR, ')'))
node.insert_child(2, arglist)
node.insert_child(2, Leaf(token.LPAR, '('))
else:
raise ValueError("Unexpected class definition")
# now stick the metaclass in the arglist
meta_txt = last_metaclass.children[0].children[0]
meta_txt.value = 'metaclass'
orig_meta_prefix = meta_txt.prefix
if arglist.children:
arglist.append_child(Leaf(token.COMMA, ','))
meta_txt.prefix = ' '
else:
meta_txt.prefix = ''
# compact the expression "metaclass = Meta" -> "metaclass=Meta"
expr_stmt = last_metaclass.children[0]
assert expr_stmt.type == syms.expr_stmt
expr_stmt.children[1].prefix = ''
expr_stmt.children[2].prefix = ''
arglist.append_child(last_metaclass)
fixup_indent(suite)
# check for empty suite
if not suite.children:
# one-liner that was just __metaclass_
suite.remove()
pass_leaf = Leaf(text_type, 'pass')
pass_leaf.prefix = orig_meta_prefix
node.append_child(pass_leaf)
node.append_child(Leaf(token.NEWLINE, '\n'))
elif len(suite.children) > 1 and \
(suite.children[-2].type == token.INDENT and
suite.children[-1].type == token.DEDENT):
# there was only one line in the class body and it was __metaclass__
pass_leaf = Leaf(text_type, 'pass')
suite.insert_child(-1, pass_leaf)
suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
|
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2012-2014 Tycho Andersen
# Copyright (c) 2013 xarvh
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 Adi Sieker
# Copyright (c) 2014 Sebastien Blot
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import pytest
import subprocess
import time
import libqtile
import libqtile.layout
import libqtile.bar
import libqtile.command
import libqtile.widget
import libqtile.manager
import libqtile.config
import libqtile.hook
import libqtile.confreader
from .conftest import whereis, BareConfig, no_xinerama, retry
class ManagerConfig(object):
auto_fullscreen = True
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2),
libqtile.layout.tile.Tile(ratio=0.5),
libqtile.layout.max.Max()
]
floating_layout = libqtile.layout.floating.Floating(
float_rules=[dict(wmclass="xclock")])
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
main = None
follow_mouse_focus = True
manager_config = pytest.mark.parametrize("qtile", [ManagerConfig], indirect=True)
@manager_config
def test_screen_dim(qtile):
# self.c.restart()
qtile.testXclock()
assert qtile.c.screen.info()["index"] == 0
assert qtile.c.screen.info()["x"] == 0
assert qtile.c.screen.info()["width"] == 800
assert qtile.c.group.info()["name"] == 'a'
assert qtile.c.group.info()["focus"] == 'xclock'
qtile.c.to_screen(1)
qtile.testXeyes()
assert qtile.c.screen.info()["index"] == 1
assert qtile.c.screen.info()["x"] == 800
assert qtile.c.screen.info()["width"] == 640
assert qtile.c.group.info()["name"] == 'b'
assert qtile.c.group.info()["focus"] == 'xeyes'
qtile.c.to_screen(0)
assert qtile.c.screen.info()["index"] == 0
assert qtile.c.screen.info()["x"] == 0
assert qtile.c.screen.info()["width"] == 800
assert qtile.c.group.info()["name"] == 'a'
assert qtile.c.group.info()["focus"] == 'xclock'
@pytest.mark.parametrize("xephyr", [{"xoffset": 0}], indirect=True)
@manager_config
def test_clone_dim(qtile):
self = qtile
self.testXclock()
assert self.c.screen.info()["index"] == 0
assert self.c.screen.info()["x"] == 0
assert self.c.screen.info()["width"] == 800
assert self.c.group.info()["name"] == 'a'
assert self.c.group.info()["focus"] == 'xclock'
assert len(self.c.screens()) == 1
@manager_config
def test_to_screen(qtile):
self = qtile
assert self.c.screen.info()["index"] == 0
self.c.to_screen(1)
assert self.c.screen.info()["index"] == 1
self.testWindow("one")
self.c.to_screen(0)
self.testWindow("two")
ga = self.c.groups()["a"]
assert ga["windows"] == ["two"]
gb = self.c.groups()["b"]
assert gb["windows"] == ["one"]
assert self.c.window.info()["name"] == "two"
self.c.next_screen()
assert self.c.window.info()["name"] == "one"
self.c.next_screen()
assert self.c.window.info()["name"] == "two"
self.c.prev_screen()
assert self.c.window.info()["name"] == "one"
@manager_config
def test_togroup(qtile):
self = qtile
self.testWindow("one")
with pytest.raises(libqtile.command.CommandError):
self.c.window.togroup("nonexistent")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("a")
assert self.c.groups()["a"]["focus"] == "one"
self.c.window.togroup("b")
assert self.c.groups()["b"]["focus"] == "one"
assert self.c.groups()["a"]["focus"] is None
self.c.to_screen(1)
self.c.window.togroup("c")
assert self.c.groups()["c"]["focus"] == "one"
@manager_config
def test_resize(qtile):
self = qtile
self.c.screen[0].resize(x=10, y=10, w=100, h=100)
@retry(ignore_exceptions=(AssertionError), fail_msg="Screen didn't resize")
def run():
d = self.c.screen[0].info()
assert d['width'] == 100
assert d['height'] == 100
return d
d = run()
assert d['x'] == d['y'] == 10
@no_xinerama
def test_minimal(qtile):
assert qtile.c.status() == "OK"
@manager_config
@no_xinerama
def test_events(qtile):
assert qtile.c.status() == "OK"
# FIXME: failing test disabled. For some reason we don't seem
# to have a keymap in Xnest or Xephyr 99% of the time.
@manager_config
@no_xinerama
def test_keypress(qtile):
self = qtile
self.testWindow("one")
self.testWindow("two")
v = self.c.simulate_keypress(["unknown"], "j")
assert v.startswith("Unknown modifier")
assert self.c.groups()["a"]["focus"] == "two"
self.c.simulate_keypress(["control"], "j")
assert self.c.groups()["a"]["focus"] == "one"
@manager_config
@no_xinerama
def test_spawn(qtile):
# Spawn something with a pid greater than init's
assert int(qtile.c.spawn("true")) > 1
@manager_config
@no_xinerama
def test_spawn_list(qtile):
# Spawn something with a pid greater than init's
assert int(qtile.c.spawn(["echo", "true"])) > 1
@retry(ignore_exceptions=(AssertionError,), fail_msg='Window did not die!')
def assert_window_died(client, window_info):
client.sync()
wid = window_info['id']
assert wid not in set([x['id'] for x in client.windows()])
@manager_config
@no_xinerama
def test_kill_window(qtile):
qtile.testWindow("one")
qtile.testwindows = []
window_info = qtile.c.window.info()
qtile.c.window[window_info["id"]].kill()
assert_window_died(qtile.c, window_info)
@manager_config
@no_xinerama
def test_kill_other(qtile):
self = qtile
self.c.group.setlayout("tile")
one = self.testWindow("one")
assert self.c.window.info()["width"] == 798
window_one_info = self.c.window.info()
assert self.c.window.info()["height"] == 578
two = self.testWindow("two")
assert self.c.window.info()["name"] == "two"
assert self.c.window.info()["width"] == 398
assert self.c.window.info()["height"] == 578
assert len(self.c.windows()) == 2
self.kill_window(one)
assert_window_died(self.c, window_one_info)
assert self.c.window.info()["name"] == "two"
assert self.c.window.info()["width"] == 798
assert self.c.window.info()["height"] == 578
self.kill_window(two)
@manager_config
@no_xinerama
def test_regression_groupswitch(qtile):
self = qtile
self.c.group["c"].toscreen()
self.c.group["d"].toscreen()
assert self.c.groups()["c"]["screen"] is None
@manager_config
@no_xinerama
def test_next_layout(qtile):
self = qtile
self.testWindow("one")
self.testWindow("two")
assert len(self.c.layout.info()["stacks"]) == 1
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.c.next_layout()
self.c.next_layout()
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 1
@manager_config
@no_xinerama
def test_setlayout(qtile):
self = qtile
assert not self.c.layout.info()["name"] == "max"
self.c.group.setlayout("max")
assert self.c.layout.info()["name"] == "max"
@manager_config
@no_xinerama
def test_adddelgroup(qtile):
self = qtile
self.testWindow("one")
self.c.addgroup("dummygroup")
self.c.addgroup("testgroup")
assert "testgroup" in self.c.groups().keys()
self.c.window.togroup("testgroup")
self.c.delgroup("testgroup")
assert "testgroup" not in self.c.groups().keys()
# Assert that the test window is still a member of some group.
assert sum(len(i["windows"]) for i in self.c.groups().values())
for i in list(self.c.groups().keys())[:-1]:
self.c.delgroup(i)
with pytest.raises(libqtile.command.CommandException):
self.c.delgroup(list(self.c.groups().keys())[0])
# Assert that setting layout via cmd_addgroup works
self.c.addgroup("testgroup2", layout='max')
assert self.c.groups()["testgroup2"]['layout'] == 'max'
@manager_config
@no_xinerama
def test_delgroup(qtile):
self = qtile
self.testWindow("one")
for i in ['a', 'd', 'c']:
self.c.delgroup(i)
with pytest.raises(libqtile.command.CommandException):
self.c.delgroup('b')
@manager_config
@no_xinerama
def test_nextprevgroup(qtile):
self = qtile
start = self.c.group.info()["name"]
ret = self.c.screen.next_group()
assert self.c.group.info()["name"] != start
assert self.c.group.info()["name"] == ret
ret = self.c.screen.prev_group()
assert self.c.group.info()["name"] == start
@manager_config
@no_xinerama
def test_toggle_group(qtile):
self = qtile
self.c.group["a"].toscreen()
self.c.group["b"].toscreen()
self.c.screen.toggle_group("c")
assert self.c.group.info()["name"] == "c"
self.c.screen.toggle_group("c")
assert self.c.group.info()["name"] == "b"
self.c.screen.toggle_group()
assert self.c.group.info()["name"] == "c"
@manager_config
@no_xinerama
def test_inspect_xeyes(qtile):
self = qtile
self.testXeyes()
assert self.c.window.inspect()
@manager_config
@no_xinerama
def test_inspect_xclock(qtile):
self = qtile
self.testXclock()
assert self.c.window.inspect()["wm_class"]
@manager_config
@no_xinerama
def test_static(qtile):
self = qtile
self.testXeyes()
self.testWindow("one")
self.c.window[self.c.window.info()["id"]].static(0, 0, 0, 100, 100)
@manager_config
@no_xinerama
def test_match(qtile):
self = qtile
self.testXeyes()
assert self.c.window.match(wname="xeyes")
assert not self.c.window.match(wname="nonexistent")
@manager_config
@no_xinerama
def test_default_float(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXclock()
assert self.c.group.info()['focus'] == 'xclock'
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 318
assert self.c.window.info()['y'] == 208
assert self.c.window.info()['floating'] is True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 328
assert self.c.window.info()['y'] == 228
assert self.c.window.info()['floating'] is True
self.c.window.set_position_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 164
assert self.c.window.info()['height'] == 164
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
assert self.c.window.info()['floating'] is True
@manager_config
@no_xinerama
def test_last_float_size(qtile):
"""
When you re-float something it would be preferable to have it use the previous float size
"""
self = qtile
self.testXeyes()
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
# float and it moves
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
# resize
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
# back to not floating
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
# float again, should use last float size
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
# make sure it works through min and max
self.c.window.toggle_maximize()
self.c.window.toggle_minimize()
self.c.window.toggle_minimize()
self.c.window.toggle_floating()
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
@manager_config
@no_xinerama
def test_float_max_min_combo(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
assert self.c.window.info()['floating'] is False
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['minimized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['minimized'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_fullscreen(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['fullscreen'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 600
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_fullscreen()
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['fullscreen'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_max(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['maximized'] is True
assert self.c.window.info()['width'] == 800
assert self.c.window.info()['height'] == 580
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_maximize()
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['maximized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_min(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['float_info'] == {
'y': 0, 'x': 400, 'width': 150, 'height': 100}
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is True
assert self.c.window.info()['minimized'] is True
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
self.c.window.toggle_minimize()
assert self.c.group.info()['focus'] == 'xeyes'
assert self.c.window.info()['floating'] is False
assert self.c.window.info()['minimized'] is False
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 400
assert self.c.window.info()['y'] == 0
@manager_config
@no_xinerama
def test_toggle_floating(qtile):
self = qtile
self.testXeyes()
assert self.c.window.info()['floating'] is False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is False
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
# change layout (should still be floating)
self.c.next_layout()
assert self.c.window.info()['floating'] is True
@manager_config
@no_xinerama
def test_floating_focus(qtile):
self = qtile
# change to 2 col stack
self.c.next_layout()
assert len(self.c.layout.info()["stacks"]) == 2
self.testXcalc()
self.testXeyes()
# self.testWindow("one")
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
self.c.window.toggle_floating()
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['name'] == 'xeyes'
assert self.c.group.info()['focus'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# change focus to xcalc
self.c.group.next_window()
assert self.c.window.info()['width'] == 398
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['name'] != 'xeyes'
assert self.c.group.info()['focus'] != 'xeyes'
# check what stack thinks is focus
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# now focusing via layout is borked (won't go to float)
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
self.c.layout.up()
assert self.c.window.info()['name'] != 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
# focus back to xeyes
self.c.group.next_window()
assert self.c.window.info()['name'] == 'xeyes'
# check what stack thinks is focus
assert [x['current'] for x in self.c.layout.info()['stacks']] == [0, 0]
@manager_config
@no_xinerama
def test_move_floating(qtile):
self = qtile
self.testXeyes()
# self.testWindow("one")
assert self.c.window.info()['width'] == 798
assert self.c.window.info()['height'] == 578
assert self.c.window.info()['x'] == 0
assert self.c.window.info()['y'] == 0
self.c.window.toggle_floating()
assert self.c.window.info()['floating'] is True
self.c.window.move_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 150
assert self.c.window.info()['height'] == 100
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(50, 90, 42, 42)
assert self.c.window.info()['width'] == 50
assert self.c.window.info()['height'] == 90
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.resize_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 60
assert self.c.window.info()['height'] == 110
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
self.c.window.set_size_floating(10, 20, 42, 42)
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
# change layout (x, y should be same)
self.c.next_layout()
assert self.c.window.info()['width'] == 10
assert self.c.window.info()['height'] == 20
assert self.c.window.info()['x'] == 10
assert self.c.window.info()['y'] == 20
@manager_config
@no_xinerama
def test_screens(qtile):
self = qtile
assert len(self.c.screens())
@manager_config
@no_xinerama
def test_rotate(qtile):
self = qtile
self.testWindow("one")
s = self.c.screens()[0]
height, width = s["height"], s["width"]
subprocess.call(
[
"xrandr",
"--output", "default",
"-display", self.display,
"--rotate", "left"
],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE
)
@retry(ignore_exceptions=(AssertionError,), fail_msg="Screen did not rotate")
def run():
s = self.c.screens()[0]
assert s['width'] == height
assert s['height'] == width
return True
run()
# TODO: see note on test_resize
@manager_config
@no_xinerama
def test_resize_(qtile):
self = qtile
self.testWindow("one")
subprocess.call(
[
"xrandr",
"-s", "480x640",
"-display", self.display
]
)
@retry(ignore_exceptions=(AssertionError,), fail_msg="Screen did not resize")
def run():
d = self.c.screen.info()
assert d['width'] == 480
assert d['height'] == 640
return True
run()
@manager_config
@no_xinerama
def test_focus_stays_on_layout_switch(qtile):
qtile.testWindow("one")
qtile.testWindow("two")
# switch to a double stack layout
qtile.c.next_layout()
# focus on a different window than the default
qtile.c.layout.next()
# toggle the layout
qtile.c.next_layout()
qtile.c.prev_layout()
assert qtile.c.window.info()['name'] == 'one'
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xeyes(qtile):
qtile.testXeyes()
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xcalc(qtile):
qtile.testXcalc()
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_xcalc_kill_window(qtile):
self = qtile
self.testXcalc()
window_info = self.c.window.info()
self.c.window.kill()
assert_window_died(self.c, window_info)
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_map_request(qtile):
self = qtile
self.testWindow("one")
info = self.c.groups()["a"]
assert "one" in info["windows"]
assert info["focus"] == "one"
self.testWindow("two")
info = self.c.groups()["a"]
assert "two" in info["windows"]
assert info["focus"] == "two"
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_unmap(qtile):
self = qtile
one = self.testWindow("one")
two = self.testWindow("two")
three = self.testWindow("three")
info = self.c.groups()["a"]
assert info["focus"] == "three"
assert len(self.c.windows()) == 3
self.kill_window(three)
assert len(self.c.windows()) == 2
info = self.c.groups()["a"]
assert info["focus"] == "two"
self.kill_window(two)
assert len(self.c.windows()) == 1
info = self.c.groups()["a"]
assert info["focus"] == "one"
self.kill_window(one)
assert len(self.c.windows()) == 0
info = self.c.groups()["a"]
assert info["focus"] is None
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_setgroup(qtile):
self = qtile
self.testWindow("one")
self.c.group["b"].toscreen()
self.groupconsistency()
if len(self.c.screens()) == 1:
assert self.c.groups()["a"]["screen"] is None
else:
assert self.c.groups()["a"]["screen"] == 1
assert self.c.groups()["b"]["screen"] == 0
self.c.group["c"].toscreen()
self.groupconsistency()
assert self.c.groups()["c"]["screen"] == 0
@pytest.mark.parametrize("qtile", [BareConfig, ManagerConfig], indirect=True)
@pytest.mark.parametrize("xephyr", [{"xinerama": True}, {"xinerama": False}], indirect=True)
def test_unmap_noscreen(qtile):
self = qtile
self.testWindow("one")
pid = self.testWindow("two")
assert len(self.c.windows()) == 2
self.c.group["c"].toscreen()
self.groupconsistency()
self.c.status()
assert len(self.c.windows()) == 2
self.kill_window(pid)
assert len(self.c.windows()) == 1
assert self.c.groups()["a"]["focus"] == "one"
def test_init():
with pytest.raises(libqtile.manager.QtileError):
libqtile.config.Key([], "unknown", libqtile.command._Call("base", None, "foo"))
with pytest.raises(libqtile.manager.QtileError):
libqtile.config.Key(["unknown"], "x", libqtile.command._Call("base", None, "foo"))
class TScreen(libqtile.config.Screen):
def setGroup(self, x, save_prev=True):
pass
def test_dx():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dx == 10
def test_dwidth():
s = TScreen(left=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dwidth == 90
s.right = libqtile.bar.Gap(10)
assert s.dwidth == 80
def test_dy():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dy == 10
def test_dheight():
s = TScreen(top=libqtile.bar.Gap(10))
s._configure(None, 0, 0, 0, 100, 100, None)
assert s.dheight == 90
s.bottom = libqtile.bar.Gap(10)
assert s.dheight == 80
class _Config(object):
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
libqtile.layout.stack.Stack(num_stacks=1),
libqtile.layout.stack.Stack(num_stacks=2)
]
floating_layout = libqtile.layout.floating.Floating()
keys = [
libqtile.config.Key(
["control"],
"k",
libqtile.command._Call([("layout", None)], "up")
),
libqtile.config.Key(
["control"],
"j",
libqtile.command._Call([("layout", None)], "down")
),
]
mouse = []
screens = [libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
],
20
),
)]
auto_fullscreen = True
class ClientNewStaticConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.static(0)
libqtile.hook.subscribe.client_new(client_new)
clientnew_config = pytest.mark.parametrize("qtile", [ClientNewStaticConfig], indirect=True)
@clientnew_config
def test_minimal_(qtile):
self = qtile
a = self.testWindow("one")
self.kill_window(a)
@pytest.mark.skipif(whereis("gkrellm") is None, reason="gkrellm not found")
@clientnew_config
def test_gkrellm(qtile):
qtile.testGkrellm()
time.sleep(0.1)
class ToGroupConfig(_Config):
@staticmethod
def main(c):
def client_new(c):
c.togroup("d")
libqtile.hook.subscribe.client_new(client_new)
togroup_config = pytest.mark.parametrize("qtile", [ToGroupConfig], indirect=True)
@togroup_config
def test_minimal__(qtile):
qtile.c.group["d"].toscreen()
qtile.c.group["a"].toscreen()
a = qtile.testWindow("one")
assert len(qtile.c.group["d"].info()["windows"]) == 1
qtile.kill_window(a)
@manager_config
def test_colorPixel(qtile):
# test for #394
qtile.c.eval("self.colorPixel(\"ffffff\")")
@manager_config
def test_change_loglevel(qtile):
assert qtile.c.loglevel() == logging.INFO
assert qtile.c.loglevelname() == 'INFO'
qtile.c.debug()
assert qtile.c.loglevel() == logging.DEBUG
assert qtile.c.loglevelname() == 'DEBUG'
qtile.c.info()
assert qtile.c.loglevel() == logging.INFO
assert qtile.c.loglevelname() == 'INFO'
qtile.c.warning()
assert qtile.c.loglevel() == logging.WARNING
assert qtile.c.loglevelname() == 'WARNING'
qtile.c.error()
assert qtile.c.loglevel() == logging.ERROR
assert qtile.c.loglevelname() == 'ERROR'
qtile.c.critical()
assert qtile.c.loglevel() == logging.CRITICAL
assert qtile.c.loglevelname() == 'CRITICAL'
|
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
"""Check input parameter validation."""
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
"""Test GradientBoostingClassifier on synthetic dataset used by
Hastie et al. in ESLII Example 12.7. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
"""Check consistency on dataset boston house prices with least squares
and least absolute deviation. """
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate((None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
"""Check consistency on dataset iris."""
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
"""Test on synthetic regression datasets used in Leo Breiman,
`Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
"""Predict probabilities."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
"""Test input checks (shape and type of X and y)."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
"""X has wrong shape """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
"""test if max_features is valid. """
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
"""Test to make sure random state is set properly. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
"""Test if max features is set properly for floats and str. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
"""Test whether staged decision function eventually gives
the same prediction.
"""
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
"""Test whether staged predict proba eventually gives
the same prediction.
"""
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
"""Check model serialization."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
"""Check if we can fit even though all targets are equal. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
"""Check if quantile loss with alpha=0.5 equals lad. """
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
"""Test with non-integer class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
"""Test with different memory layouts of X and y"""
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
"""Test if oob improvement has correct shape and regression test. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
"""Test if oob improvement has correct shape. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
"""Check OOB improvement on multi-class dataset."""
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
"""Check verbose=1 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
"""Check verbose=2 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
"""Test if warm start equals fit. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
"""Test if warm start equals fit - set n_estimators. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
"""Test if possible to fit trees of different depth in ensemble. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
"""Test if fit clears state. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
"""Test if warm start with zero n_estimators raises error """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
"""Test if warm start with smaller n_estimators raises error """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
"""Test if warm start with equal n_estimators does nothing """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
"""Test if oob can be turned on during warm start. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
"""Test if warm start OOB equals fit. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
"""Test if monitor return value works. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k+1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k+1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
"""Test if ZeroEstimator works for regression. """
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
"""Test if ZeroEstimator works for classification. """
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
"""Test if warm_start does nothing if n_estimators is not changed.
Regression test for #3513.
"""
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
"""Predict probabilities."""
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('ls', 'huber', 'lad', 'quantile'):
gb = GradientBoostingRegressor(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
if __name__ == "__main__":
import nose
nose.runmodule()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import re
import json
import ctypes
import distutils.dir_util as dir_util
from os.path import expanduser
from zipfile import *
import webbrowser
try:
from Tkinter import Tk
except ImportError:
pass
def define_operation_system():
platform_name = sys.platform
if re.search(r'win', platform_name, re.IGNORECASE):
return 'windows'
elif re.search(r'linux', platform_name, re.IGNORECASE):
return 'linux'
def list_dir(_dir):
return os.listdir(_dir)
def rename_file(_old_name, _new_name):
if is_exits(_old_name):
os.rename(_old_name, _new_name)
return True
return False
def remove_file(_path):
if os.path.exists(_path):
os.unlink(_path)
def remove_directory(_path):
#distutils.dir_util.remove_tree(directory[, verbose=0, dry_run=0])
if os.path.exists(_path):
try:
os.rmdir(_path)
except OSError as we:
dir_util.remove_tree(_path)
def create_file(_file_name, _content=''):
"""
if not os.path.exists(_file_name):
with open(_file_name, "w") as handler:
handler.write(_content)
"""
with open(_file_name, "w+") as handler:
handler.write(_content)
def create_directory(_dir_name):
if not os.path.exists(_dir_name):
os.mkdir(_dir_name)
def hide_file(_path):
if __check_environment(_path):
try:
__hide(_path)
except Exception as e:
print e.message
def show_file(_path):
if __check_environment(_path):
try:
__show(_path)
except Exception as e:
print e.message
def hide_directory(_path):
hide_file(_path)
def show_directory(_path):
show_file(_path)
def __hide(_name):
FILE_ATTRIBUTE_HIDDEN = 0x02
ret = ctypes.windll.kernel32.SetFileAttributesW(ur''+_name, FILE_ATTRIBUTE_HIDDEN)
if ret:
print #'attribute set to Hidden'
else: # return code of zero indicates failure, raise Windows error
raise WinError()
def __show(_name):
FILE_ATTRIBUTE_VISIBLE = 0x04
ret = ctypes.windll.kernel32.SetFileAttributesW(ur''+_name, FILE_ATTRIBUTE_VISIBLE)
if ret:
print #'attribute set to visible'
else: # return code of zero indicates failure, raise Windows error
raise WinError()
def __check_environment(_path):
if define_operation_system() == 'windows' and os.path.exists(_path):
return True
def get_home():
return expanduser("~")
def get_current_path():
return os.path.abspath(".")
def object_to_json(_json_object):
return json.dumps(_json_object, indent=4)
def generate_json_file(_filename, _pkg_list):
json_object = {"require": _pkg_list}
content = object_to_json(json_object)
create_file(_filename, content)
def separator():
return os.sep
def create_tree(_tree):
os.makedirs(_tree)
def is_exits(_path):
return os.path.exists(_path)
def decompress_zipfile(_filename, _to_path):
try:
zip_archive = ZipFile(_filename, mode="r", compression=ZIP_STORED, allowZip64=False)
zip_archive.extractall(path=_to_path)
zip_archive.close()
except IOError as (nerror, strerror):
print nerror, ' ', strerror
except BadZipfile as e:
print e.message
def compress_folder(_foldername, _zipfilename):
try:
zip_archive = ZipFile(_zipfilename, mode="w")
for root, dirs, files in os.walk(_foldername):
for file in files:
zip_archive.write(os.path.join(root, file))
zip_archive.close()
except IOError as (nerror, strerror):
print nerror, ' ', strerror
except BadZipfile as e:
print e.message
def get_folder_size(_path="."):
if not is_exits(_path):
return
folder = _path
folder_size = 0
for (path, dirs, files) in os.walk(folder):
for _file in files:
filename = os.path.join(path, _file)
folder_size += os.path.getsize(filename)
return {'mb': to_mb(folder_size), 'kb': to_kb(folder_size)}
def to_mb(_bits):
return _bits/(1024*1024.0)
def to_kb(_bits):
return _bits/1024.0
def get_file_size(_path):
if not is_exits(_path):
return
file_size = os.path.getsize(_path)
return {'mb': (file_size/(1024*1024.0)), 'kb': (file_size/1024.0)}
def open_url(_url):
"""
Opens URL in DEFAULT browser
:param _url:
:return:
"""
webbrowser.open(_url)
def copy_to_clipboard(_str):
tk = Tk()
tk.withdraw()
tk.clipboard_clear()
tk.clipboard_append(_str)
tk.destroy()
if define_operation_system() == 'windows':
from ctypes import WinError
|
|
###
# Copyright (c) 2017 Diamond Light Source Ltd.
#
# Contributors:
# Charles Mita - initial API and implementation and/or initial documentation
#
###
import itertools
from scanpointgenerator.compat import np
class Dimension(object):
"""
An unrolled set of generators joined by excluders.
Represents a single dimension within a scan.
"""
def __init__(self, generators, excluders=None):
self.generators = list(generators)
self.excluders = list(excluders) if excluders is not None else []
self.axes = list(axis for g in self.generators for axis in g.axes)
"""list(str): Unrolled axes within the dimension"""
self.size = None
"""int: Size of the dimension"""
self.upper = [g.positions[a].max((0,)) for g in self.generators for a in g.axes]
"""list(float): Upper bound for the dimension"""
self.lower = [g.positions[a].min((0,)) for g in self.generators for a in g.axes]
"""list(float): Lower bound for the dimension"""
self.alternate = self.generators[0].alternate
self._prepared = False
self.indices = []
# validate alternating constraints
# we currently do not allow a non-alternating generator inside an
# alternating one due to potentially "surprising" behaviour of the
# non-alternating generator (the dimension itself will be alternating)
started_alternating = False
for g in self.generators:
if started_alternating and not g.alternate:
raise ValueError(
"Cannot nest non-alternating generators in "
"alternating generators within a Dimension "
"due to inconsistent output paths")
started_alternating = started_alternating or g.alternate
def apply_excluder(self, excluder):
"""Add an excluder to the current Dimension"""
if self._prepared:
raise ValueError("Dimension already prepared")
if not set(excluder.axes) <= set(self.axes):
raise ValueError("Excluder axes '%s' do not apply to Dimension axes '%s'" \
% (excluder.axes, self.axes))
self.excluders.append(excluder)
def get_positions(self, axis):
"""
Retrieve the positions for a given axis within the dimension.
Args:
axis (str): axis to get positions for
Returns:
Positions (np.array): Array of positions
"""
# check that this dimension is prepared
if not self._prepared:
raise ValueError("Must call prepare first")
return self.positions[axis]
def get_mesh_map(self, axis):
"""
Retrieve the mesh map (indices) for a given axis within the dimension.
Args:
axis (str): axis to get positions for
Returns:
Positions (np.array): Array of mesh indices
"""
# the points for this axis must be scaled and then indexed
if not self._prepared:
raise ValueError("Must call prepare first")
# scale up points for axis
gen = [g for g in self.generators if axis in g.axes][0]
points = gen.positions[axis]
# just get index of points instead of actual point value
points = np.arange(len(points))
if gen.alternate:
points = np.append(points, points[::-1])
tile = 0.5 if self.alternate else 1
repeat = 1
for g in self.generators[:self.generators.index(gen)]:
tile *= g.size
for g in self.generators[self.generators.index(gen) + 1:]:
repeat *= g.size
points = np.repeat(points, repeat)
if tile % 1 != 0:
p = np.tile(points, int(tile))
points = np.append(p, points[:int(len(points)//2)])
else:
points = np.tile(points, int(tile))
return points[self.indices]
def get_point(self, idx):
if not self._prepared:
raise ValueError("Must call prepare first")
axis_points = {axis:self.positions[axis][idx] for axis in self.positions}
return axis_points
def get_bounds(self, idx, reverse=False):
if not self._prepared:
raise ValueError("Must call prepare first")
if not reverse:
axis_upper, axis_lower = self.upper_bounds, self.lower_bounds
else:
axis_upper, axis_lower = self.lower_bounds, self.upper_bounds
lower = {axis:axis_lower[axis][idx] for axis in axis_lower}
upper = {axis:axis_upper[axis][idx] for axis in axis_upper}
return lower, upper
def prepare(self):
"""
Prepare data structures required to determine size and
filtered positions of the dimension.
Must be called before get_positions or get_mesh_map are called.
"""
axis_positions = {}
axis_bounds_lower = {}
axis_bounds_upper = {}
masks = []
# scale up all position arrays
# inner generators are tiled by the size of out generators
# outer generators have positions repeated by the size of inner generators
repeats, tilings, dim_size = 1, 1, 1
for g in self.generators:
repeats *= g.size
dim_size *= g.size
for gen in self.generators:
repeats /= gen.size
for axis in gen.axes:
positions = gen.positions[axis]
if gen.alternate:
positions = np.append(positions, positions[::-1])
positions = np.repeat(positions, repeats)
p = np.tile(positions, (tilings // 2))
if tilings % 2 != 0:
positions = np.append(p, positions[:int(len(positions)//2)])
else:
positions = p
else:
positions = np.repeat(positions, repeats)
positions = np.tile(positions, tilings)
axis_positions[axis] = positions
tilings *= gen.size
# produce excluder masks
for excl in self.excluders:
arrays = [axis_positions[axis] for axis in excl.axes]
excluder_mask = excl.create_mask(*arrays)
masks.append(excluder_mask)
# AND all masks together (empty mask is all values selected)
mask = masks[0] if len(masks) else np.full(dim_size, True, dtype=np.int8)
for m in masks[1:]:
mask &= m
gen = self.generators[-1]
if getattr(gen, "bounds", None):
tilings = np.prod(np.array([g.size for g in self.generators[:-1]]))
if gen.alternate:
tilings /= 2.
for axis in gen.axes:
upper_base = gen.bounds[axis][1:]
lower_base = gen.bounds[axis][:-1]
upper, lower = upper_base, lower_base
if gen.alternate:
upper = np.append(upper_base, lower_base[::-1])
lower = np.append(lower_base, upper_base[::-1])
upper = np.tile(upper, int(tilings))
lower = np.tile(lower, int(tilings))
if tilings % 1 != 0:
upper = np.append(upper, upper_base)
lower = np.append(lower, lower_base)
axis_bounds_upper[axis] = upper
axis_bounds_lower[axis] = lower
self.mask = mask
self.indices = self.mask.nonzero()[0]
self.size = len(self.indices)
self.positions = {axis:axis_positions[axis][self.indices] for axis in axis_positions}
self.upper_bounds = {axis:self.positions[axis] for axis in self.positions}
self.lower_bounds = {axis:self.positions[axis] for axis in self.positions}
for axis in axis_bounds_lower:
self.upper_bounds[axis] = axis_bounds_upper[axis][self.indices]
self.lower_bounds[axis] = axis_bounds_lower[axis][self.indices]
self._prepared = True
@staticmethod
def merge_dimensions(dimensions):
generators = itertools.chain.from_iterable(d.generators for d in dimensions)
excluders = itertools.chain.from_iterable(d.excluders for d in dimensions)
return Dimension(generators, excluders)
|
|
#!/usr/bin/env python
#
# Setup script for Review Board.
#
# A big thanks to Django project for some of the fixes used in here for
# MacOS X and data files installation.
import json
import os
import subprocess
import sys
from distutils.command.install import INSTALL_SCHEMES
from distutils.core import Command
from setuptools import setup, find_packages
from setuptools.command.develop import develop
from setuptools.command.egg_info import egg_info
from reviewboard import get_package_version, VERSION
from reviewboard.dependencies import (build_dependency_list,
package_dependencies)
# Make sure this is a version of Python we are compatible with. This should
# prevent people on older versions from unintentionally trying to install
# the source tarball, and failing.
if sys.hexversion < 0x02070000:
sys.stderr.write(
'Review Board %s is incompatible with your version of Python.\n'
'Please install Review Board 2.5.x or upgrade Python to at least '
'2.7.\n' % get_package_version())
sys.exit(1)
# Make sure we're actually in the directory containing setup.py.
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
class BuildEggInfoCommand(egg_info):
"""Build the egg information for the package.
If this is called when building a distribution (source, egg, or wheel),
or when installing the package from source, this will kick off tasks for
building static media and string localization files.
"""
def run(self):
"""Build the egg information."""
if ('sdist' in sys.argv or
'bdist_egg' in sys.argv or
'bdist_wheel' in sys.argv or
'install' in sys.argv):
self.run_command('build_media')
self.run_command('build_i18n')
egg_info.run(self)
class DevelopCommand(develop):
"""Installs Review Board in developer mode.
This will install all standard and development dependencies (using Python
wheels and node.js packages from npm) and add the source tree to the
Python module search path. That includes updating the versions of pip
and setuptools on the system.
To speed up subsequent runs, callers can pass ``--no-npm`` to prevent
installing node.js packages.
"""
user_options = develop.user_options + [
('no-npm', None, "Don't install packages from npm"),
('use-npm-cache', None, 'Use npm-cache to install packages'),
('with-doc-deps', None, 'Install documentation-related dependencies'),
]
boolean_options = develop.boolean_options + [
'no-npm',
'use-npm-cache',
'with-doc-deps',
]
def initialize_options(self):
"""Initialize options for the command."""
develop.initialize_options(self)
self.no_npm = None
self.with_doc_deps = None
self.use_npm_cache = None
def install_for_development(self):
"""Install the package for development.
This takes care of the work of installing all dependencies.
"""
if self.no_deps:
# In this case, we don't want to install any of the dependencies
# below. However, it's really unlikely that a user is going to
# want to pass --no-deps.
#
# Instead, what this really does is give us a way to know we've
# been called by `pip install -e .`. That will call us with
# --no-deps, as it's going to actually handle all dependency
# installation, rather than having easy_install do it.
develop.install_for_development(self)
return
# Install the latest pip and setuptools. Note that the order here
# matters, as otherwise a stale setuptools can be left behind,
# causing installation errors.
self._run_pip(['install', '-U', 'setuptools'])
self._run_pip(['install', '-U', 'pip'])
# Install the dependencies using pip instead of easy_install. This
# will use wheels instead of eggs, which are ideal for our users.
if sys.platform == 'darwin':
# We're building on macOS, and some of our dependencies
# (hi there, mercurial!) won't compile using gcc (their default
# in some cases), so we want to force the proper compiler.
os.putenv(b'CC', b'clang')
self._run_pip(['install', '-e', '.'])
self._run_pip(['install', '-r', 'dev-requirements.txt'])
if self.with_doc_deps:
self._run_pip(['install', '-r', 'doc-requirements.txt'])
if not self.no_npm:
if self.use_npm_cache:
self.distribution.command_options['install_node_deps'] = {
'use_npm_cache': ('install_node_deps', 1),
}
self.run_command('install_node_deps')
def _run_pip(self, args):
"""Run pip.
Args:
args (list):
Arguments to pass to :command:`pip`.
Raises:
RuntimeError:
The :command:`pip` command returned a non-zero exit code.
"""
cmd = subprocess.list2cmdline([sys.executable, '-m', 'pip'] + args)
ret = os.system(cmd)
if ret != 0:
raise RuntimeError('Failed to run `%s`' % cmd)
class BuildMediaCommand(Command):
"""Builds static media files for the package.
This requires first having the node.js dependencies installed.
"""
user_options = []
def initialize_options(self):
"""Initialize options for the command.
This is required, but does not actually do anything.
"""
pass
def finalize_options(self):
"""Finalize options for the command.
This is required, but does not actually do anything.
"""
pass
def run(self):
"""Runs the commands to build the static media files.
Raises:
RuntimeError:
Static media failed to build.
"""
retcode = subprocess.call([
sys.executable, 'contrib/internal/build-media.py'])
if retcode != 0:
raise RuntimeError('Failed to build media files')
class BuildI18nCommand(Command):
"""Builds string localization files."""
description = 'Compile message catalogs to .mo'
user_options = []
def initialize_options(self):
"""Initialize options for the command.
This is required, but does not actually do anything.
"""
pass
def finalize_options(self):
"""Finalize options for the command.
This is required, but does not actually do anything.
"""
pass
def run(self):
"""Runs the commands to build the string localization files.
Raises:
RuntimeError:
Localization files failed to build.
"""
retcode = subprocess.call([
sys.executable, 'contrib/internal/build-i18n.py'])
if retcode != 0:
raise RuntimeError('Failed to build i18n files')
class ListNodeDependenciesCommand(Command):
""""Write all node.js dependencies to standard output."""
description = 'Generate a package.json that lists node.js dependencies'
user_options = [
('to-stdout', None,
'Write to standard output instead of a package.json file.')
]
boolean_options = ['to-file']
def initialize_options(self):
"""Set the command's option defaults."""
self.to_stdout = False
def finalize_options(self):
"""Post-process command options.
This method intentionally left blank.
"""
pass
def run(self):
"""Run the command."""
if self.to_stdout:
self._write_deps(sys.stdout)
else:
with open('package.json', 'w') as f:
self._write_deps(f)
def _write_deps(self, f):
"""Write the packaage.json to the given file handle.
Args:
f (file):
The file handle to write to.
"""
from djblets.dependencies import npm_dependencies
f.write(json.dumps(
{
'name': 'reviewboard',
'private': 'true',
'devDependencies': {},
'dependencies': npm_dependencies,
},
indent=2))
f.write('\n')
class InstallNodeDependenciesCommand(Command):
"""Install all node.js dependencies from npm.
If ``--use-npm-cache`` is passed, this will use :command:`npm-cache`
to install the packages, which is best for Continuous Integration setups.
Otherwise, :command:`npm` is used.
"""
description = \
'Install the node packages required for building static media.'
user_options = [
('use-npm-cache', None, 'Use npm-cache to install packages'),
]
boolean_options = ['use-npm-cache']
def initialize_options(self):
"""Initialize options for the command."""
self.use_npm_cache = None
def finalize_options(self):
"""Finalize options for the command.
This is required, but does not actually do anything.
"""
pass
def run(self):
"""Run the commands to install packages from npm.
Raises:
RuntimeError:
There was an error finding or invoking the package manager.
"""
if self.use_npm_cache:
npm_command = 'npm-cache'
else:
npm_command = 'npm'
try:
subprocess.check_call([npm_command, '--version'])
except subprocess.CalledProcessError:
raise RuntimeError(
'Unable to locate %s in the path, which is needed to '
'install dependencies required to build this package.'
% npm_command)
# By this point, dependencies should be installed for us. We're also
# using the same exact dependencies as Djblets, so no need to
# duplicate that list.
self.run_command('list_node_deps')
print 'Installing node.js modules...'
result = os.system('%s install' % npm_command)
os.unlink('package.json')
if result != 0:
raise RuntimeError(
'One or more node.js modules could not be installed.')
def build_entrypoints(prefix, entrypoints):
"""Build and return a list of entrypoints from a module prefix and list.
This is a utility function to help with constructing entrypoints to pass
to :py:func:`~setuptools.setup`. It takes a module prefix and a condensed
list of tuples of entrypoint names and relative module/class paths.
Args:
prefix (unicode):
The prefix for each module path.
entrypoints (list of tuple):
A list of tuples of entries for the entrypoints. Each tuple
contains an entrypoint name and a relative path to append to the
prefix.
Returns:
list of unicode:
A list of entrypoint items.
"""
result = []
for entrypoint_id, rel_class_name in entrypoints:
if ':' in rel_class_name:
sep = '.'
else:
sep = ':'
result.append('%s = %s%s%s' % (entrypoint_id, prefix, sep,
rel_class_name))
return result
PACKAGE_NAME = 'ReviewBoard'
setup(
name=PACKAGE_NAME,
version=get_package_version(),
license='MIT',
description=(
'Review Board, a fully-featured web-based code and document '
'review tool made with love <3'
),
author='Beanbag, Inc.',
author_email='reviewboard@googlegroups.com',
url='https://www.reviewboard.org/',
download_url=('https://downloads.reviewboard.org/releases/%s/%s.%s/'
% (PACKAGE_NAME, VERSION[0], VERSION[1])),
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': build_entrypoints(
'reviewboard.cmdline',
[
('rb-site', 'rbsite:main'),
('rbext', 'rbext:main'),
('rbssh', 'rbssh:main'),
]
),
'reviewboard.hosting_services': build_entrypoints(
'reviewboard.hostingsvcs',
[
('assembla', 'assembla:Assembla'),
('beanstalk', 'beanstalk:Beanstalk'),
('bitbucket', 'bitbucket:Bitbucket'),
('bugzilla', 'bugzilla:Bugzilla'),
('codebasehq', 'codebasehq:CodebaseHQ'),
('fedorahosted', 'fedorahosted:FedoraHosted'),
('fogbugz', 'fogbugz:FogBugz'),
('github', 'github:GitHub'),
('gitlab', 'gitlab:GitLab'),
('gitorious', 'gitorious:Gitorious'),
('googlecode', 'googlecode:GoogleCode'),
('jira', 'jira:JIRA'),
('kiln', 'kiln:Kiln'),
('rbgateway', 'rbgateway:ReviewBoardGateway'),
('redmine', 'redmine:Redmine'),
('sourceforge', 'sourceforge:SourceForge'),
('splat', 'splat:Splat'),
('trac', 'trac:Trac'),
('unfuddle', 'unfuddle:Unfuddle'),
('versionone', 'versionone:VersionOne'),
]
),
'reviewboard.scmtools': build_entrypoints(
'reviewboard.scmtools',
[
('bzr', 'bzr:BZRTool'),
('clearcase', 'clearcase:ClearCaseTool'),
('cvs', 'cvs:CVSTool'),
('git', 'git:GitTool'),
('hg', 'hg:HgTool'),
('perforce', 'perforce:PerforceTool'),
('plastic', 'plastic:PlasticTool'),
('svn', 'svn:SVNTool'),
]
),
'reviewboard.auth_backends': build_entrypoints(
'reviewboard.accounts.backends',
[
('ad', 'ActiveDirectoryBackend'),
('ldap', 'LDAPBackend'),
('nis', 'NISBackend'),
('x509', 'X509Backend'),
('digest', 'HTTPDigestBackend'),
]
),
},
install_requires=build_dependency_list(package_dependencies),
include_package_data=True,
zip_safe=False,
cmdclass={
'develop': DevelopCommand,
'egg_info': BuildEggInfoCommand,
'build_media': BuildMediaCommand,
'build_i18n': BuildI18nCommand,
'install_node_deps': InstallNodeDependenciesCommand,
'list_node_deps': ListNodeDependenciesCommand,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
'Topic :: Software Development :: Quality Assurance',
],
)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
from shadowsocks import usagelog
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local, local_address):
self._nread = 0
self._nwrite = 0
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
self._local_address = local_address
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def usage(self):
usage = {
'server_port': self._server._listen_port,
'nread': self._nread,
'nwrite': self._nwrite,
'client': self._local_address,
'remote': self.remote_address
}
return usage
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
if sock == self._local_sock:
self._nwrite += len(data)
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._nread += len(data)
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._nwrite += len(data)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPForwardRelayHandler(TCPRelayHandler):
FORWARD_PREFIX = None
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local, local_address):
super(TCPForwardRelayHandler, self).__init__(
server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local, local_address)
self._stage = STAGE_ADDR
if TCPForwardRelayHandler.FORWARD_PREFIX is None:
# init FORWARD_ADDRESS first time
forward_config = config.get('local-forward', None)
if (forward_config is None) or (':' not in forward_config):
logging.critical('invalid forward: %s', forward_coinfig)
exit(1)
socks5_atyp = 0x01
forward_address, forward_port = forward_config.split(':')
try:
forward_address = socket.inet_aton(forward_address)
except:
# domain name
socks5_atyp = 0x03
forward_address = struct.pack('!B', len(forward_address)) + forward_address
forward_port = struct.pack('!H', int(forward_port))
TCPForwardRelayHandler.FORWARD_PREFIX = b''.join([
'\x05\x01\x00',
chr(socks5_atyp),
forward_address,
forward_port
])
self._handle_stage_addr(TCPForwardRelayHandler.FORWARD_PREFIX)
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = ord(data[1])
if cmd == CMD_CONNECT:
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception("could not parse header")
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting: %s:%d from %s:%d',
common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1])
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
# this will only work on local
self.destroy()
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None,
RelayHandler=TCPRelayHandler):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
self.RelayHandler = RelayHandler
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
usagelog.log(handler.usage)
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
self.RelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local, local_address=conn[1])
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
|
|
# encoding: utf-8
#
# Copyright (C) 2011, 2012 Google Inc.
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from future.utils import PY2, native
import tempfile
import os
import sys
import signal
import socket
import stat
import subprocess
# Creation flag to disable creating a console window on Windows. See
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
CREATE_NO_WINDOW = 0x08000000
# Executable extensions used on Windows
WIN_EXECUTABLE_EXTS = [ '.exe', '.bat', '.cmd' ]
# Don't use this! Call PathToCreatedTempDir() instead. This exists for the sake
# of tests.
RAW_PATH_TO_TEMP_DIR = os.path.join( tempfile.gettempdir(), 'ycm_temp' )
# Readable, writable and executable by everyone.
ACCESSIBLE_TO_ALL_MASK = ( stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH |
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP )
# Python 3 complains on the common open(path).read() idiom because the file
# doesn't get closed. So, a helper func.
# Also, all files we read are UTF-8.
def ReadFile( filepath ):
with open( filepath, encoding = 'utf8' ) as f:
return f.read()
# Returns a file object that can be used to replace sys.stdout or sys.stderr
def OpenForStdHandle( filepath ):
# Need to open the file in binary mode on py2 because of bytes vs unicode.
# If we open in text mode (default), then third-party code that uses `print`
# (we're replacing sys.stdout!) with an `str` object on py2 will cause
# tracebacks because text mode insists on unicode objects. (Don't forget,
# `open` is actually `io.open` because of future builtins.)
# Since this function is used for logging purposes, we don't want the output
# to be delayed. This means no buffering for binary mode and line buffering
# for text mode. See https://docs.python.org/2/library/io.html#io.open
if PY2:
return open( filepath, mode = 'wb', buffering = 0 )
return open( filepath, mode = 'w', buffering = 1 )
# Given an object, returns a str object that's utf-8 encoded. This is meant to
# be used exclusively when producing strings to be passed to the C++ Python
# plugins. For other code, you likely want to use ToBytes below.
def ToCppStringCompatible( value ):
if isinstance( value, str ):
return native( value.encode( 'utf8' ) )
if isinstance( value, bytes ):
return native( value )
return native( str( value ).encode( 'utf8' ) )
# Returns a unicode type; either the new python-future str type or the real
# unicode type. The difference shouldn't matter.
def ToUnicode( value ):
if not value:
return str()
if isinstance( value, str ):
return value
if isinstance( value, bytes ):
# All incoming text should be utf8
return str( value, 'utf8' )
return str( value )
# Consistently returns the new bytes() type from python-future. Assumes incoming
# strings are either UTF-8 or unicode (which is converted to UTF-8).
def ToBytes( value ):
if not value:
return bytes()
# This is tricky. On py2, the bytes type from builtins (from python-future) is
# a subclass of str. So all of the following are true:
# isinstance(str(), bytes)
# isinstance(bytes(), str)
# But they don't behave the same in one important aspect: iterating over a
# bytes instance yields ints, while iterating over a (raw, py2) str yields
# chars. We want consistent behavior so we force the use of bytes().
if type( value ) == bytes:
return value
# This is meant to catch Python 2's native str type.
if isinstance( value, bytes ):
return bytes( value, encoding = 'utf8' )
if isinstance( value, str ):
# On py2, with `from builtins import *` imported, the following is true:
#
# bytes(str(u'abc'), 'utf8') == b"b'abc'"
#
# Obviously this is a bug in python-future. So we work around it. Also filed
# upstream at: https://github.com/PythonCharmers/python-future/issues/193
# We can't just return value.encode( 'utf8' ) on both py2 & py3 because on
# py2 that *sometimes* returns the built-in str type instead of the newbytes
# type from python-future.
if PY2:
return bytes( value.encode( 'utf8' ), encoding = 'utf8' )
else:
return bytes( value, encoding = 'utf8' )
# This is meant to catch `int` and similar non-string/bytes types.
return ToBytes( str( value ) )
def ByteOffsetToCodepointOffset( line_value, byte_offset ):
"""The API calls for byte offsets into the UTF-8 encoded version of the
buffer. However, ycmd internally uses unicode strings. This means that
when we need to walk 'characters' within the buffer, such as when checking
for semantic triggers and similar, we must use codepoint offets, rather than
byte offsets.
This method converts the |byte_offset|, which is a utf-8 byte offset, into
a codepoint offset in the unicode string |line_value|."""
byte_line_value = ToBytes( line_value )
return len( ToUnicode( byte_line_value[ : byte_offset - 1 ] ) ) + 1
def CodepointOffsetToByteOffset( unicode_line_value, codepoint_offset ):
"""The API calls for byte offsets into the UTF-8 encoded version of the
buffer. However, ycmd internally uses unicode strings. This means that
when we need to walk 'characters' within the buffer, such as when checking
for semantic triggers and similar, we must use codepoint offets, rather than
byte offsets.
This method converts the |codepoint_offset| which is a unicode codepoint
offset into an byte offset into the utf-8 encoded bytes version of
|unicode_line_value|."""
# Should be a no-op, but in case someone passes a bytes instance.
unicode_line_value = ToUnicode( unicode_line_value )
return len( ToBytes( unicode_line_value[ : codepoint_offset - 1 ] ) ) + 1
def PathToCreatedTempDir( tempdir = RAW_PATH_TO_TEMP_DIR ):
try:
os.makedirs( tempdir )
# Needed to support multiple users working on the same machine;
# see issue 606.
MakeFolderAccessibleToAll( tempdir )
except OSError:
# Folder already exists, skip folder creation.
pass
return tempdir
def MakeFolderAccessibleToAll( path_to_folder ):
current_stat = os.stat( path_to_folder )
flags = current_stat.st_mode | ACCESSIBLE_TO_ALL_MASK
os.chmod( path_to_folder, flags )
def GetUnusedLocalhostPort():
sock = socket.socket()
# This tells the OS to give us any free port in the range [1024 - 65535]
sock.bind( ( '', 0 ) )
port = sock.getsockname()[ 1 ]
sock.close()
return port
def RemoveIfExists( filename ):
try:
os.remove( filename )
except OSError:
pass
def PathToFirstExistingExecutable( executable_name_list ):
for executable_name in executable_name_list:
path = FindExecutable( executable_name )
if path:
return path
return None
# On Windows, distutils.spawn.find_executable only works for .exe files
# but .bat and .cmd files are also executables, so we use our own
# implementation.
def FindExecutable( executable ):
paths = os.environ[ 'PATH' ].split( os.pathsep )
base, extension = os.path.splitext( executable )
if OnWindows() and extension.lower() not in WIN_EXECUTABLE_EXTS:
extensions = WIN_EXECUTABLE_EXTS
else:
extensions = ['']
for extension in extensions:
executable_name = executable + extension
if not os.path.isfile( executable_name ):
for path in paths:
executable_path = os.path.join(path, executable_name )
if os.path.isfile( executable_path ):
return executable_path
else:
return executable_name
return None
def ExecutableName( executable ):
return executable + ( '.exe' if OnWindows() else '' )
def OnWindows():
return sys.platform == 'win32'
def OnCygwin():
return sys.platform == 'cygwin'
def OnMac():
return sys.platform == 'darwin'
def ProcessIsRunning( handle ):
return handle is not None and handle.poll() is None
# From here: http://stackoverflow.com/a/8536476/1672783
def TerminateProcess( pid ):
if OnWindows():
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess( PROCESS_TERMINATE,
False,
pid )
ctypes.windll.kernel32.TerminateProcess( handle, -1 )
ctypes.windll.kernel32.CloseHandle( handle )
else:
os.kill( pid, signal.SIGTERM )
def PathsToAllParentFolders( path ):
folder = os.path.normpath( path )
if os.path.isdir( folder ):
yield folder
while True:
parent = os.path.dirname( folder )
if parent == folder:
break
folder = parent
yield folder
def ForceSemanticCompletion( request_data ):
return ( 'force_semantic' in request_data and
bool( request_data[ 'force_semantic' ] ) )
# A wrapper for subprocess.Popen that fixes quirks on Windows.
def SafePopen( args, **kwargs ):
if OnWindows():
# We need this to start the server otherwise bad things happen.
# See issue #637.
if kwargs.get( 'stdin_windows' ) is subprocess.PIPE:
kwargs[ 'stdin' ] = subprocess.PIPE
# Do not create a console window
kwargs[ 'creationflags' ] = CREATE_NO_WINDOW
# Python 2 fails to spawn a process from a command containing unicode
# characters on Windows. See https://bugs.python.org/issue19264 and
# http://bugs.python.org/issue1759845.
# Since paths are likely to contains such characters, we convert them to
# short ones to obtain paths with only ascii characters.
if PY2:
args = ConvertArgsToShortPath( args )
kwargs.pop( 'stdin_windows', None )
return subprocess.Popen( args, **kwargs )
# We need to convert environment variables to native strings on Windows and
# Python 2 to prevent a TypeError when passing them to a subprocess.
def SetEnviron( environ, variable, value ):
if OnWindows() and PY2:
environ[ native( ToBytes( variable ) ) ] = native( ToBytes( value ) )
else:
environ[ variable ] = value
# Convert paths in arguments command to short path ones
def ConvertArgsToShortPath( args ):
def ConvertIfPath( arg ):
if os.path.exists( arg ):
return GetShortPathName( arg )
return arg
if isinstance( args, str ) or isinstance( args, bytes ):
return ConvertIfPath( args )
return [ ConvertIfPath( arg ) for arg in args ]
# Get the Windows short path name.
# Based on http://stackoverflow.com/a/23598461/200291
def GetShortPathName( path ):
if not OnWindows():
return path
from ctypes import windll, wintypes, create_unicode_buffer
# Set the GetShortPathNameW prototype
_GetShortPathNameW = windll.kernel32.GetShortPathNameW
_GetShortPathNameW.argtypes = [ wintypes.LPCWSTR,
wintypes.LPWSTR,
wintypes.DWORD]
_GetShortPathNameW.restype = wintypes.DWORD
output_buf_size = 0
while True:
output_buf = create_unicode_buffer( output_buf_size )
needed = _GetShortPathNameW( path, output_buf, output_buf_size )
if output_buf_size >= needed:
return output_buf.value
else:
output_buf_size = needed
# Shim for imp.load_source so that it works on both Py2 & Py3. See upstream
# Python docs for info on what this does.
def LoadPythonSource( name, pathname ):
if PY2:
import imp
return imp.load_source( name, pathname )
else:
import importlib
return importlib.machinery.SourceFileLoader( name, pathname ).load_module()
def SplitLines( contents ):
"""Return a list of each of the lines in the unicode string |contents|.
Behaviour is equivalent to str.splitlines with the following exceptions:
- empty strings are returned as [ '' ]
- a trailing newline is not ignored (i.e. SplitLines( '\n' )
returns [ '', '' ], not [ '' ]"""
# We often want to get a list representation of a buffer such that we can
# index all of the 'lines' within it. Python provides str.splitlines for this
# purpose, but its documented behaviors for empty strings and strings ending
# with a newline character are not compatible with this. As a result, we write
# our own wrapper to provide a splitlines implementation which returns the
# actual list of indexable lines in a buffer, where a line may have 0
# characters.
#
# NOTE: str.split( '\n' ) actually gives this behaviour, except it does not
# work when running on a unix-like system and reading a file with Windows line
# endings.
# ''.splitlines() returns [], but we want [ '' ]
if contents == '':
return [ '' ]
lines = contents.splitlines()
# '\n'.splitlines() returns [ '' ]. We want [ '', '' ].
# '\n\n\n'.splitlines() returns [ '', '', '' ]. We want [ '', '', '', '' ].
#
# So we re-instate the empty entry at the end if the original string ends
# with a newline. Universal newlines recognise the following as
# line-terminators:
# - '\n'
# - '\r\n'
# - '\r'
#
# Importantly, notice that \r\n also ends with \n
#
if contents.endswith( '\r' ) or contents.endswith( '\n' ):
lines.append( '' )
return lines
|
|
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import subprocess
from collections import defaultdict
import re
from imagemounter import _util
from imagemounter.exceptions import ArgumentError, SubsystemError, ModuleNotFoundError
logger = logging.getLogger(__name__)
class VolumeSystem(object):
"""A VolumeSystem is a collection of volumes. Every :class:`Disk` contains exactly one VolumeSystem. Each
system contains several :class:`Volumes`, which, in turn, may contain additional volume systems.
"""
def __init__(self, parent, vstype='', volume_detector=''):
"""Creates a VolumeSystem.
:param parent: the parent may either be a :class:`Disk` or a :class:`Volume` that contains this VolumeSystem.
:param str vstype: the volume system type to use.
:param str volume_detector: the volume system detection method to use
"""
self.parent = parent
self.disk = parent.disk if hasattr(parent, 'disk') else parent
if vstype:
self.vstype = vstype
elif self.parent.index in self.disk.parser.vstypes:
self.vstype = self.disk.parser.vstypes[self.parent.index]
elif '*' in self.disk.parser.vstypes:
self.vstype = self.disk.parser.vstypes['*']
else:
self.vstype = "detect"
if volume_detector == 'auto' or not volume_detector:
self.volume_detector = VolumeSystem._determine_auto_detection_method()
else:
self.volume_detector = volume_detector
self.volume_source = ""
self.volumes = []
self.has_detected = False
self._disktype = defaultdict(dict)
def __iter__(self):
for v in self.volumes:
yield v
def __len__(self):
return len(self.volumes)
def __getitem__(self, item):
item_suffix = ".{}".format(item)
for v in self.volumes:
if v.index.endswith(item_suffix) or v.index == str(item):
return v
raise KeyError
def _make_subvolume(self, **args):
"""Creates a subvolume, adds it to this class and returns it."""
from imagemounter.volume import Volume
v = Volume(disk=self.disk, parent=self.parent,
volume_detector=self.volume_detector,
**args) # vstype is not passed down, let it decide for itself.
self.volumes.append(v)
return v
def _make_single_subvolume(self, only_one=True, **args):
"""Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it.
:param bool only_one: if this volume system already has at least one volume, it is returned instead.
"""
if only_one and self.volumes:
return self.volumes[0]
if self.parent.index is None:
index = '0'
else:
index = '{0}.0'.format(self.parent.index)
volume = self._make_subvolume(index=index, **args)
return volume
def detect_volumes(self, vstype=None, method=None, force=False):
"""Iterator for detecting volumes within this volume system.
:param str vstype: The volume system type to use. If None, uses :attr:`vstype`
:param str method: The detection method to use. If None, uses :attr:`detection`
:param bool force: Specify if you wnat to force running the detection if has_Detected is True.
"""
if self.has_detected and not force:
logger.warning("Detection already ran.")
return
if vstype is None:
vstype = self.vstype
if method is None:
method = self.volume_detector
if method == 'auto':
method = VolumeSystem._determine_auto_detection_method()
if vstype == 'lvm':
for v in self._detect_lvm_volumes(self.parent.info.get('volume_group')):
yield v
elif vstype == 'vss':
for v in self._detect_vss_volumes(self.parent._paths['vss']):
yield v
elif method == 'single': # dummy method for Disk
for v in self._detect_single_volume():
yield v
elif method == 'mmls':
for v in self._detect_mmls_volumes(vstype):
yield v
elif method == 'parted':
for v in self._detect_parted_volumes(vstype):
yield v
elif method == 'pytsk3':
for v in self._detect_pytsk3_volumes(vstype):
yield v
else:
logger.error("No viable detection method found")
raise ArgumentError("No viable detection method found")
self.has_detected = True
@staticmethod
def _determine_auto_detection_method():
"""Return the detection method to use when the detection method is 'auto'"""
if _util.module_exists('pytsk3'):
return 'pytsk3'
elif _util.command_exists('mmls'):
return 'mmls'
else:
return 'parted'
def _format_index(self, idx):
"""Returns a formatted index given the disk index idx."""
if self.parent.index is not None:
return '{0}.{1}'.format(self.parent.index, idx)
else:
return str(idx)
def _detect_single_volume(self):
"""'Detects' a single volume. It should not be called other than from a :class:`Disk`."""
volume = self._make_single_subvolume(offset=0)
is_directory = os.path.isdir(self.parent.get_raw_path())
if is_directory:
filesize = _util.check_output_(['du', '-scDb', self.parent.get_raw_path()]).strip()
if filesize:
volume.size = int(filesize.splitlines()[-1].split()[0])
else:
description = _util.check_output_(['file', '-sL', self.parent.get_raw_path()]).strip()
if description:
# description is the part after the :, until the first comma
volume.info['fsdescription'] = description.split(': ', 1)[1].split(',', 1)[0].strip()
if 'size' in description:
volume.size = int(re.findall(r'size:? (\d+)', description)[0])
else:
volume.size = os.path.getsize(self.parent.get_raw_path())
volume.flag = 'alloc'
self.volume_source = 'single'
self._assign_disktype_data(volume)
yield volume
def _find_pytsk3_volumes(self, vstype='detect'):
"""Finds all volumes based on the pytsk3 library."""
try:
# noinspection PyUnresolvedReferences
import pytsk3
except ImportError:
logger.error("pytsk3 not installed, could not detect volumes")
raise ModuleNotFoundError("pytsk3")
baseimage = None
try:
# ewf raw image is now available on base mountpoint
# either as ewf1 file or as .dd file
raw_path = self.parent.get_raw_path()
# noinspection PyBroadException
try:
baseimage = pytsk3.Img_Info(raw_path)
except Exception:
logger.error("Failed retrieving image info (possible empty image).", exc_info=True)
return []
try:
volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_' + vstype.upper()),
self.parent.offset // self.disk.block_size)
self.volume_source = 'multi'
return volumes
except Exception as e:
# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:
if "(GPT or DOS at 0)" in str(e) and vstype != 'gpt':
self.vstype = 'gpt'
# noinspection PyBroadException
try:
logger.warning("Error in retrieving volume info: TSK couldn't decide between GPT and DOS, "
"choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True)
volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_GPT'))
self.volume_source = 'multi'
return volumes
except Exception as e:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
else:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
finally:
if baseimage:
baseimage.close()
del baseimage
def _detect_pytsk3_volumes(self, vstype='detect'):
"""Generator that mounts every partition of this image and yields the mountpoint."""
# Loop over all volumes in image.
for p in self._find_pytsk3_volumes(vstype):
import pytsk3
volume = self._make_subvolume(index=self._format_index(p.addr),
offset=p.start * self.disk.block_size,
size=p.len * self.disk.block_size)
# Fill volume with more information
volume.info['fsdescription'] = p.desc.strip().decode('utf-8')
if p.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC:
volume.flag = 'alloc'
volume.slot = _util.determine_slot(p.table_num, p.slot_num)
self._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(p.start, p.len,
volume.info['fsdescription']))
elif p.flags == pytsk3.TSK_VS_PART_FLAG_UNALLOC:
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1} ".format(p.start, p.len))
elif p.flags == pytsk3.TSK_VS_PART_FLAG_META:
volume.flag = 'meta'
logger.info("Found meta volume: block offset: {0}, length: {1} ".format(p.start, p.len))
yield volume
def _detect_mmls_volumes(self, vstype='detect'):
"""Finds and mounts all volumes based on mmls."""
try:
cmd = ['mmls']
if self.parent.offset:
cmd.extend(['-o', str(self.parent.offset // self.disk.block_size)])
if vstype != 'detect':
cmd.extend(['-t', vstype])
cmd.append(self.parent.get_raw_path())
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
self.volume_source = 'multi'
except Exception as e:
# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:
if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt':
self.vstype = 'gpt'
# noinspection PyBroadException
try:
logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, "
"choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True)
cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()]
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
self.volume_source = 'multi'
except Exception as e:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
else:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
output = output.split("Description", 1)[-1]
for line in output.splitlines():
if not line:
continue
# noinspection PyBroadException
try:
values = line.split(None, 5)
# sometimes there are only 5 elements available
description = ''
index, slot, start, end, length = values[0:5]
if len(values) > 5:
description = values[5]
volume = self._make_subvolume(index=self._format_index(int(index[:-1])),
offset=int(start) * self.disk.block_size,
size=int(length) * self.disk.block_size)
volume.info['fsdescription'] = description
except Exception:
logger.exception("Error while parsing mmls output")
continue
if slot.lower() == 'meta':
volume.flag = 'meta'
logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length))
elif slot.lower().startswith('-----'):
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length))
else:
volume.flag = 'alloc'
if ":" in slot:
volume.slot = _util.determine_slot(*slot.split(':'))
else:
volume.slot = _util.determine_slot(-1, slot)
self._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length,
volume.info['fsdescription']))
yield volume
def _detect_parted_volumes(self, vstype='detect'):
"""Finds and mounts all volumes based on parted."""
# for some reason, parted does not properly return extended volume types in its machine
# output, so we need to execute it twice.
meta_volumes = []
# noinspection PyBroadException
try:
output = _util.check_output_(['parted', self.parent.get_raw_path(), 'print'], stdin=subprocess.PIPE)
for line in output.splitlines():
if 'extended' in line:
meta_volumes.append(int(line.split()[0]))
except Exception:
logger.exception("Failed executing parted command.")
# skip detection of meta volumes
# noinspection PyBroadException
try:
# parted does not support passing in the vstype. It either works, or it doesn't.
cmd = ['parted', self.parent.get_raw_path(), '-sm', 'unit s', 'print free']
output = _util.check_output_(cmd, stdin=subprocess.PIPE)
self.volume_source = 'multi'
except Exception as e:
logger.exception("Failed executing parted command")
raise SubsystemError(e)
num = 0
for line in output.splitlines():
if line.startswith("Warning") or not line or ':' not in line or line.startswith(self.parent.get_raw_path()):
continue
line = line[:-1] # remove last ;
try:
slot, start, end, length, description = line.split(':', 4)
if ':' in description:
description, label, flags = description.split(':', 2)
else:
description, label, flags = description, '', ''
try:
slot = int(slot)
except ValueError:
continue
volume = self._make_subvolume(index=self._format_index(num),
offset=int(start[:-1]) * self.disk.block_size, # remove last s
size=int(length[:-1]) * self.disk.block_size)
volume.info['fsdescription'] = description
if label:
volume.info['label'] = label
if flags:
volume.info['parted_flags'] = flags
# TODO: detection of meta volumes
if description == 'free':
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start[:-1],
length[:-1]))
elif slot in meta_volumes:
volume.flag = 'meta'
volume.slot = slot
logger.info("Found meta volume: block offset: {0}, length: {1}".format(start[:-1], length[:-1]))
else:
volume.flag = 'alloc'
volume.slot = slot
self._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} "
.format(start[:-1], length[:-1], volume.info['fsdescription']))
except AttributeError:
logger.exception("Error while parsing parted output")
continue
num += 1
yield volume
def _detect_lvm_volumes(self, volume_group):
"""Gather information about lvolumes, gathering their label, size and raw path"""
result = _util.check_output_(["lvm", "lvdisplay", volume_group])
cur_v = None
for l in result.splitlines():
if "--- Logical volume ---" in l:
cur_v = self._make_subvolume(index=self._format_index(len(self)), flag='alloc')
cur_v.info['fsdescription'] = 'Logical Volume'
if "LV Name" in l:
cur_v.info['label'] = l.replace("LV Name", "").strip()
if "LV Size" in l:
size, unit = l.replace("LV Size", "").strip().split(" ", 1)
cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2,
'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1))
if "LV Path" in l:
cur_v._paths['lv'] = l.replace("LV Path", "").strip()
cur_v.offset = 0
logger.info("{0} volumes found".format(len(self)))
self.volume_source = 'multi'
return self.volumes
def _detect_vss_volumes(self, path):
"""Detect volume shadow copy volumes in the specified path."""
try:
volume_info = _util.check_output_(["vshadowinfo", "-o", str(self.parent.offset), self.parent.get_raw_path()])
except Exception as e:
logger.exception("Failed obtaining info from the volume shadow copies.")
raise SubsystemError(e)
current_store = None
for line in volume_info.splitlines():
line = line.strip()
if line.startswith("Store:"):
idx = line.split(":")[-1].strip()
current_store = self._make_subvolume(index=self._format_index(idx), flag='alloc', offset=0)
current_store._paths['vss_store'] = os.path.join(path, 'vss' + idx)
current_store.info['fsdescription'] = 'VSS Store'
elif line.startswith("Volume size"):
current_store.size = int(line.split(":")[-1].strip().split()[0])
elif line.startswith("Creation time"):
current_store.info['creation_time'] = line.split(":")[-1].strip()
return self.volumes
def preload_volume_data(self):
"""Preloads volume data. It is used to call internal methods that contain information about a volume."""
self._load_disktype_data()
def _load_disktype_data(self):
"""Calls the :command:`disktype` command and obtains the disk GUID from GPT volume systems. As we
are running the tool anyway, the label is also extracted from the tool if it is not yet set.
The disktype data is only loaded and not assigned to volumes yet.
"""
if not _util.command_exists('disktype'):
logger.warning("disktype not installed, could not detect volume type")
return None
disktype = _util.check_output_(['disktype', self.parent.get_raw_path()]).strip()
current_partition = None
for line in disktype.splitlines():
if not line:
continue
# noinspection PyBroadException
try:
line = line.strip()
find_partition_nr = re.match(r"^Partition (\d+):", line)
if find_partition_nr:
current_partition = int(find_partition_nr.group(1))
elif current_partition is not None:
if line.startswith("Type ") and "GUID" in line:
self._disktype[current_partition]['guid'] = \
line[line.index('GUID') + 5:-1].strip() # output is between ()
elif line.startswith("Partition Name "):
self._disktype[current_partition]['label'] = \
line[line.index('Name ') + 6:-1].strip() # output is between ""
except Exception:
logger.exception("Error while parsing disktype output")
return
def _assign_disktype_data(self, volume, slot=None):
"""Assigns cached disktype data to a volume."""
if slot is None:
slot = volume.slot
if slot in self._disktype:
data = self._disktype[slot]
if not volume.info.get('guid') and 'guid' in data:
volume.info['guid'] = data['guid']
if not volume.info.get('label') and 'label' in data:
volume.info['label'] = data['label']
|
|
import configparser
import logging
import multiprocessing
import multiprocessing.managers
import multiprocessing.synchronize
import numbers
import os
import re
from typing import Optional, Set, Iterable, List, Tuple, Final, Iterator, Union
import appdirs
import numpy as np
from PyQt5 import QtCore
from .calculations.outliertest import OutlierMethod
from .h5io import ProcessingH5File
from .loader import Loader
from ..algorithms.matrixaverager import ErrorPropagationMethod
from ..dataclasses.exposure import QRangeMethod
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class ProcessingSettings(QtCore.QObject):
_errorprop2str: Final[List[Tuple[ErrorPropagationMethod, str]]] = {
(ErrorPropagationMethod.Conservative, 'Conservative'),
(ErrorPropagationMethod.Linear, 'Average'),
(ErrorPropagationMethod.Gaussian, 'Squared (Gaussian)'),
(ErrorPropagationMethod.Weighted, 'Weighted'),
}
rootpath: str = '.'
eval2dsubpath: str = 'eval2d'
masksubpath: str = 'mask'
fsndigits: int = 5
prefix: str = 'crd'
ierrorprop: ErrorPropagationMethod = ErrorPropagationMethod.Gaussian
qerrorprop: ErrorPropagationMethod = ErrorPropagationMethod.Gaussian
outliermethod: OutlierMethod = OutlierMethod.IQR
outlierthreshold: float = 1.5
outlierlogcormat: bool = True
bigmemorymode: bool = False
h5lock: multiprocessing.synchronize.RLock
badfsns: Set[int]
fsnranges: List[Tuple[int, int]]
qrangemethod: QRangeMethod = QRangeMethod.Linear
qcount: int = 0 # 0 means the same number as pixels
settingsChanged = QtCore.pyqtSignal()
badfsnsChanged = QtCore.pyqtSignal()
_h5io: Optional[ProcessingH5File] = None
_manager: multiprocessing.managers.SyncManager
_modified: bool = False
_loader: Optional[Loader] = None
def __init__(self, filename: str):
super().__init__()
self.loadDefaults()
self.filename = filename
self.rootpath = os.getcwd()
self._manager = multiprocessing.Manager()
self.h5lock = self._manager.RLock()
self.badfsns = set()
self.fsnranges = []
self.load(filename)
def loadDefaults(self):
cp = configparser.ConfigParser()
cp.read([os.path.join(appdirs.user_config_dir('cct'), 'cpt4.conf')])
cp['DEFAULT'] = {'eval2dsubpath': 'eval2d',
'masksubpath': 'mask',
'fsndigits': '5',
'prefix': 'crd',
'ierrorprop': ErrorPropagationMethod.Conservative.name,
'qerrorprop': ErrorPropagationMethod.Conservative.name,
'outliermethod': OutlierMethod.IQR.value,
'outlierthreshold': '1.5',
'logcorrmat': 'yes',
'bigmemorymode': 'no',
'qrangemethod': QRangeMethod.Linear.name,
'qrangecount': 0,
}
if not cp.has_section('cpt4'):
cp.add_section('cpt4')
cpt4section = cp['cpt4']
self.eval2dsubpath = cpt4section.get('eval2dsubpath')
self.masksubpath = cpt4section.get('masksubpath')
self.fsndigits = cpt4section.getint('fsndigits')
self.prefix = cpt4section.get('prefix')
self.ierrorprop = ErrorPropagationMethod[cpt4section.get('ierrorprop')]
self.qerrorprop = ErrorPropagationMethod[cpt4section.get('qerrorprop')]
self.outliermethod = OutlierMethod(cpt4section.get('outliermethod'))
self.outlierthreshold = cpt4section.getfloat('outlierthreshold')
self.outlierlogcormat = cpt4section.getboolean('logcorrmat')
self.bigmemorymode = cpt4section.getboolean('bigmemorymode')
self.qrangemethod = QRangeMethod[cpt4section.get('qrangemethod')]
self.qcount = cpt4section.getint('qrangecount')
def saveDefaults(self):
cp = configparser.ConfigParser()
cp.read(os.path.join(appdirs.user_config_dir('cct'), 'cpt4.conf'))
if not cp.has_section('cpt4'):
cp.add_section('cpt4')
cpt4section = cp['cpt4']
cpt4section['eval2dsubpath'] = self.eval2dsubpath
cpt4section['masksubpath'] = self.masksubpath
cpt4section['fsndigits'] = str(self.fsndigits)
cpt4section['prefix'] = self.prefix
cpt4section['ierrorprop'] = self.ierrorprop.name
cpt4section['qerrorprop'] = self.qerrorprop.name
cpt4section['outliermethod'] = self.outliermethod.value
cpt4section['outlierthreshold'] = str(self.outlierthreshold)
cpt4section['logcorrmat'] = 'yes' if self.outlierlogcormat else 'no'
cpt4section['bigmemorymode'] = 'yes' if self.bigmemorymode else 'no'
cpt4section['qrangecount'] = str(self.qcount)
cpt4section['qrangemethod'] = self.qrangemethod.name
os.makedirs(appdirs.user_config_dir('cct'), exist_ok=True)
with open(os.path.join(appdirs.user_config_dir('cct'), 'cpt4.conf'), 'wt') as f:
cp.write(f)
def addBadFSNs(self, badfsns: Iterable[int]):
newbadfsns = [b for b in badfsns if b not in self.badfsns]
logger.debug(f'New bad fsns: {newbadfsns}')
logger.debug(f'Old bad fsns: {self.badfsns}')
self.badfsns = self.badfsns.union(newbadfsns)
self.saveBadFSNs()
self.badfsnsChanged.emit()
def markAsBad(self, fsn: Union[int, Iterable[int]]):
if isinstance(fsn, numbers.Number):
fsn = [fsn]
self.badfsns = self.badfsns.union(fsn)
self.saveBadFSNs()
self.badfsnsChanged.emit()
def markAsGood(self, fsn: Union[int, Iterable[int]]):
if isinstance(fsn, numbers.Number):
fsn = [fsn]
self.badfsns = self.badfsns.difference(fsn)
self.saveBadFSNs()
self.badfsnsChanged.emit()
def saveBadFSNs(self):
with self.h5io.writer('cptsettings') as grp:
try:
del grp['badfsns']
except KeyError:
pass
grp.create_dataset('badfsns', data=np.array(sorted(self.badfsns)), dtype=np.int)
logger.debug('BadFSNs list saved to HDF5 file.')
def loadBadFSNs(self, filename: Optional[str] = None):
try:
with self.h5io.reader('cptsettings') as grp:
badfsns = set(np.array(grp['badfsns']).tolist())
except:
logger.warning('Cannot read badFSNS list from H5 file, trying to load it from the badfsns file instead.')
if filename is not None:
try:
badfsns = np.loadtxt(filename).astype(np.int)
except IOError:
logger.warning(f'Could not open badfsns file {filename}')
return
else:
badfsns = []
self.badfsns = set(badfsns)
self.badfsnsChanged.emit()
return self.badfsns
def load(self, filename: str):
if filename.lower().endswith('.cpt') or filename.lower().endswith('.cpt2'):
cp = configparser.ConfigParser()
cp.read([filename])
def parsefsnranges(s) -> List[Tuple[int, int]]:
if not (m := re.match(r'\[(\(\d+\s*,\s*\d+\))(?:,\s*(\(\d+\s*,\s*\d+\)))*\]', s)):
raise ValueError(f'Invalid FSN range designation: {s}.')
logger.debug(str(m.groups()))
return [tuple([int(g1) for g1 in re.match(r'\((\d+),\s*(\d+)\)', g).groups()]) for g in m.groups() if
g is not None]
for attr, section, option, typeconversion in [
('filename', 'io', 'hdf5', lambda s: os.path.split(s)[-1]),
('rootpath', 'io', 'datadir', str),
('eval2dsubpath', 'io', 'eval2dsubpath', str),
('masksubpath', 'io', 'masksubpath', str),
('fsndigits', 'io', 'fsndigits', str),
('ierrorprop', 'processing', 'errorpropagation',
lambda val: [ep for ep, s in self._errorprop2str if s == val][0]),
('qerrorprop', 'processing', 'abscissaerrorpropagation',
lambda val: [ep for ep, s in self._errorprop2str if s == val][0]),
('outliermethod', 'processing', 'outliermethod', OutlierMethod),
('outlierthreshold', 'processing', 'std_multiplier', float),
('outlierlogcormat', 'processing', 'logcorrelmatrix', lambda val: val.upper().strip() == 'TRUE'),
('fsnranges', 'io', 'fsnranges', parsefsnranges),
('qrangemethod', 'processing', 'qrangemethod', lambda x: QRangeMethod[x]),
('qcount', 'processing', 'qrangecount', int),
('badfsns', 'io', 'badfsnsfile', self.loadBadFSNs),
]:
try:
setattr(self, attr, typeconversion(cp[section][option]))
except KeyError:
logger.debug(f'Cannot read attribute from ini file: {attr}')
continue
try:
re_int = r'[+-]?\d+'
re_float = r'[+-]?(\d*\.\d+|\d+\.\d*)([eE][+-]?\d+)?'
re_number = f'({re_float}|{re_int})'
for record in cp['processing']['subtraction'].split(';'):
if (m := re.match(r"SubtractionJobRecord\s*\("
r"\s*sample\s*=\s*'(?P<samplename>[^']+)'\s*,"
r"\s*background\s*=\s*'(?P<background>[^']+)'\s*,"
r"\s*mode\s*=\s*'(?P<scalingmode>(None|Constant|Interval|Power-law))'\s*,"
rf"\s*params\s*=\s*'\((?P<params>(\s*{re_number}\s*(\s*,\s*{re_number}\s*)*)?)\)'\)",
record.strip())) is None:
raise ValueError(f'Cannot parse string {record.strip()}')
with self.h5io.writer(group='Samples') as grp:
g = grp.require_group(f'{m["samplename"]}-{m["background"]}')
g.attrs['subtraction_samplename'] = m["samplename"]
g.attrs['subtraction_background'] = m["background"]
g.attrs['subtraction_mode'] = m['scalingmode']
if not m['params']:
g.attrs['subtraction_factor'] = 1.0
g.attrs['subtraction_factor_unc'] = 0.0
g.attrs['subtraction_qmin'] = 0
g.attrs['subtraction_qmax'] = 1000
g.attrs['subtraction_qcount'] = 100
elif m['params'].count(',') == 0:
g.attrs['subtraction_factor'] = float(m['params'])
g.attrs['subtraction_factor_unc'] = 0.0
g.attrs['subtraction_qmin'] = 0
g.attrs['subtraction_qmax'] = 1000
g.attrs['subtraction_qcount'] = 100
elif m['params'].count(',') == 2:
qmin, qmax, qcount = m['params'].split(',')
g.attrs['subtraction_factor'] = 1.0
g.attrs['subtraction_factor_unc'] = 0.0
g.attrs['subtraction_qmin'] = float(qmin)
g.attrs['subtraction_qmax'] = float(qmax)
g.attrs['subtraction_qcount'] = int(qcount)
else:
logger.warning(f'Cannot parse subtraction parameters: {m["params"]}')
g.attrs['subtraction_factor'] = 1.0
g.attrs['subtraction_factor_unc'] = 0.0
g.attrs['subtraction_qmin'] = 0
g.attrs['subtraction_qmax'] = 1000
g.attrs['subtraction_qcount'] = 100
except KeyError:
pass
logger.debug(f'Loaded settings from ini file {filename}')
return cp
elif (filename.lower().endswith('.h5')) or (filename.lower().endswith('.cpt4')):
self.filename = filename
isinstance(self.h5io,
ProcessingH5File) # ensure the h5io is reconstructed with the new file name. Don't use assert, it can be disabled!
try:
with self.h5io.reader('cptsettings') as grp:
identity = lambda a: a
for attrname, grpname, h5attrname, typeconversion in [
('filename', 'io', 'hdf5', identity),
('rootpath', 'io', 'datadir', identity),
('eval2dsubpath', 'io', 'eval2dsubpath', identity),
('masksubpath', 'io', 'masksubpath', identity),
('fsndigits', 'io', 'fsndigits', identity),
('ierrorprop', 'processing', 'errorpropagation', ErrorPropagationMethod),
('qerrorprop', 'processing', 'qerrorpropagation', ErrorPropagationMethod),
('outliermethod', 'processing', 'outliermethod', OutlierMethod),
('outlierthreshold', 'processing', 'outlierthreshold', identity),
('outlierlogcormat', 'processing', 'logcorrelmatrix', identity),
('qrangemethod', 'processing', 'qrangemethod', lambda x: QRangeMethod[x]),
('count', 'processing', 'qrangecount', int),
]:
try:
setattr(self, attrname, typeconversion(grp[grpname].attrs[h5attrname]))
except KeyError:
logger.debug(f'Cannot read attribute from h5 file: {attrname}')
continue
fsnrangesdata = grp['io']['fsnranges']
self.fsnranges = [(fsnrangesdata[i, 0], fsnrangesdata[i, 1]) for i in range(fsnrangesdata.shape[0])]
self.loadBadFSNs()
logger.info(f'Loaded config from H5 file {filename}')
except (OSError, KeyError):
logger.warning(f'Could not load config from H5 file {filename}')
else:
raise ValueError('Unknown file format.')
def save(self, filename: Optional[str] = None):
self.saveDefaults()
with self.h5io.writer('cptsettings') as grp:
iogrp = grp.require_group('io')
iogrp.attrs['datadir'] = self.rootpath
iogrp.attrs['eval2dsubpath'] = self.eval2dsubpath
iogrp.attrs['masksubpath'] = self.masksubpath
iogrp.attrs['fsndigits'] = self.fsndigits
iogrp.attrs['prefix'] = self.prefix
iogrp.attrs['bigmemorymode'] = self.bigmemorymode
try:
del iogrp['fsnranges']
except KeyError:
pass
if self.fsnranges:
iogrp.create_dataset('fsnranges', data=np.vstack(self.fsnranges))
else:
iogrp.create_dataset('fsnranges', data=np.array([]))
processinggrp = grp.require_group('processing')
processinggrp.attrs['errorpropagation'] = self.ierrorprop.value
processinggrp.attrs['qerrorpropagation'] = self.qerrorprop.value
processinggrp.attrs['outliermethod'] = self.outliermethod.value
processinggrp.attrs['outlierthreshold'] = self.outlierthreshold
processinggrp.attrs['logcorrelmatrix'] = self.outlierlogcormat
processinggrp.attrs['qrangemethod'] = self.qrangemethod.name
processinggrp.attrs['qrangecount'] = self.qcount
logger.info(f'Saved settings to h5 file {self.h5io.filename}')
@property
def lockManager(self) -> multiprocessing.managers.SyncManager:
return self._manager
@property
def h5io(self) -> ProcessingH5File:
if (self._h5io is None) or (self._h5io.filename != self.filename):
with self.h5lock:
self._h5io = ProcessingH5File(self.filename, self.h5lock)
return self._h5io
def fsns(self) -> Iterator[int]:
for fmin, fmax in self.fsnranges:
yield from range(fmin, fmax + 1)
def emitSettingsChanged(self):
self.save()
self.settingsChanged.emit()
def loader(self) -> Loader:
if self._loader is None:
return Loader(self.rootpath, self.eval2dsubpath, self.masksubpath, self.fsndigits)
elif (self._loader.rootpath != self.rootpath) or (self._loader.eval2dsubpath != self.eval2dsubpath) or \
(self._loader.masksubpath != self.masksubpath) or (self._loader.fsndigits != self.fsndigits):
del self._loader
self._loader = None
return self.loader()
else:
return self._loader
|
|
# Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import os
import re
import socket
import time
import six.moves.http_client as httplib
import boto
from boto import config, storage_uri_for_key
from boto.connection import AWSAuthConnection
from boto.exception import ResumableDownloadException
from boto.exception import ResumableTransferDisposition
from boto.s3.keyfile import KeyFile
from boto.gs.key import Key as GSKey
"""
Resumable download handler.
Resumable downloads will retry failed downloads, resuming at the byte count
completed by the last download attempt. If too many retries happen with no
progress (per configurable num_retries param), the download will be aborted.
The caller can optionally specify a tracker_file_name param in the
ResumableDownloadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
Note that resumable downloads work across providers (they depend only
on support Range GETs), but this code is in the boto.s3 package
because it is the wrong abstraction level to go in the top-level boto
package.
TODO: At some point we should refactor the code to have a storage_service
package where all these provider-independent files go.
"""
class ByteTranslatingCallbackHandler(object):
"""
Proxy class that translates progress callbacks made by
boto.s3.Key.get_file(), taking into account that we're resuming
a download.
"""
def __init__(self, proxied_cb, download_start_point):
self.proxied_cb = proxied_cb
self.download_start_point = download_start_point
def call(self, total_bytes_uploaded, total_size):
self.proxied_cb(self.download_start_point + total_bytes_uploaded,
total_size)
def get_cur_file_size(fp, position_to_eof=False):
"""
Returns size of file, optionally leaving fp positioned at EOF.
"""
if isinstance(fp, KeyFile) and not position_to_eof:
# Avoid EOF seek for KeyFile case as it's very inefficient.
return fp.getkey().size
if not position_to_eof:
cur_pos = fp.tell()
fp.seek(0, os.SEEK_END)
cur_file_size = fp.tell()
if not position_to_eof:
fp.seek(cur_pos, os.SEEK_SET)
return cur_file_size
class ResumableDownloadHandler(object):
"""
Handler for resumable downloads.
"""
MIN_ETAG_LEN = 5
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each downloaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracking info
about this download. If supplied and the current process fails
the download, it can be retried in a new process. If called
with an existing file containing an unexpired timestamp,
we'll resume the transfer for this file; else we'll start a
new resumable download.
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable
download making no progress. (Count resets every time we get
progress, so download can span many more than this number of
retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.etag_value_for_current_download = None
if tracker_file_name:
self._load_tracker_file_etag()
# Save download_start_point in instance state so caller can
# find how much was transferred by this ResumableDownloadHandler
# (across retries).
self.download_start_point = None
def _load_tracker_file_etag(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
self.etag_value_for_current_download = f.readline().rstrip('\n')
# We used to match an MD5-based regex to ensure that the etag was
# read correctly. Since ETags need not be MD5s, we now do a simple
# length sanity check instead.
if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN:
print('Couldn\'t read etag in tracker file (%s). Restarting '
'download from scratch.' % self.tracker_file_name)
except IOError as e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because
# self.etag_value_for_current_download is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'download from scratch.' %
(self.tracker_file_name, e.strerror))
finally:
if f:
f.close()
def _save_tracker_info(self, key):
self.etag_value_for_current_download = key.etag.strip('"\'')
if not self.tracker_file_name:
return
f = None
try:
f = open(self.tracker_file_name, 'w')
f.write('%s\n' % self.etag_value_for_current_download)
except IOError as e:
raise ResumableDownloadException(
'Couldn\'t write tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured download tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
finally:
if f:
f.close()
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs):
"""
Attempts a resumable download.
Raises ResumableDownloadException if any problems occur.
"""
cur_file_size = get_cur_file_size(fp, position_to_eof=True)
if (cur_file_size and
self.etag_value_for_current_download and
self.etag_value_for_current_download == key.etag.strip('"\'')):
# Try to resume existing transfer.
if cur_file_size > key.size:
raise ResumableDownloadException(
'%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
'if you re-try this download it will start from scratch' %
(fp.name, cur_file_size, str(storage_uri_for_key(key)),
key.size), ResumableTransferDisposition.ABORT)
elif cur_file_size == key.size:
if key.bucket.connection.debug >= 1:
print('Download complete.')
return
if key.bucket.connection.debug >= 1:
print('Resuming download.')
headers = headers.copy()
headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1)
cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call
self.download_start_point = cur_file_size
else:
if key.bucket.connection.debug >= 1:
print('Starting new resumable download.')
self._save_tracker_info(key)
self.download_start_point = 0
# Truncate the file, in case a new resumable download is being
# started atop an existing file.
fp.truncate(0)
# Disable AWSAuthConnection-level retry behavior, since that would
# cause downloads to restart from scratch.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
fp.flush()
def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
version_id=None, hash_algs=None):
"""
Retrieves a file from a Key
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object from which upload is to be downloaded
:type fp: file
:param fp: File pointer into which data should be downloaded
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from the storage service and
the second representing the total number of bytes that need
to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be
called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type version_id: string
:param version_id: The version ID (optional)
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib/md5.md5}.
Raises ResumableDownloadException if a problem occurs during
the transfer.
"""
debug = key.bucket.connection.debug
if not headers:
headers = {}
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 6.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
progress_less_iterations = 0
while True: # Retry as long as we're making progress.
had_file_bytes_before_attempt = get_cur_file_size(fp)
try:
self._attempt_resumable_download(key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs)
# Download succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
# Previously, check_final_md5() was called here to validate
# downloaded file's checksum, however, to be consistent with
# non-resumable downloads, this call was removed. Checksum
# validation of file contents should be done by the caller.
if debug >= 1:
print('Resumable download complete.')
return
except self.RETRYABLE_EXCEPTIONS as e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close and reopen the key before resuming
# the download.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
except ResumableDownloadException as e:
if (e.disposition ==
ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s)' % e.message)
raise
elif (e.disposition ==
ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s); aborting and removing tracker file' %
e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableDownloadException (%s) - will '
'retry' % e.message)
# At this point we had a re-tryable failure; see if made progress.
if get_cur_file_size(fp) > had_file_bytes_before_attempt:
progress_less_iterations = 0
else:
progress_less_iterations += 1
if progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableDownloadException(
'Too many resumable download attempts failed without '
'progress. You might try this download again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Close the key, in case a previous download died partway
# through and left data in the underlying key HTTP buffer.
# Do this within a try/except block in case the connection is
# closed (since key.close() attempts to do a final read, in which
# case this read attempt would get an IncompleteRead exception,
# which we can safely ignore.
try:
key.close()
except httplib.IncompleteRead:
pass
sleep_time_secs = 2**progress_less_iterations
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %d seconds before re-trying' %
(progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
|
|
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.db import connection
from django.db.models import F
from django.db.models.fields import Field, IntegerField
from django.db.models.functions import Upper
from django.db.models.lookups import Contains, Exact
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.test.utils import CaptureQueriesContext, register_lookup
from django.urls import reverse
from django.utils import formats
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, EventAdmin,
FilteredChildAdmin, GroupAdmin, InvitationAdmin,
NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin, SwallowAdmin,
site as custom_site,
)
from .models import (
Band, CharPK, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser,
Event, Genre, Group, Invitation, Membership, Musician, OrderedObject,
Parent, Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
def build_tbody_html(pk, href, extra_fields):
return (
'<tbody><tr class="row1">'
'<td class="action-checkbox">'
'<input type="checkbox" name="_selected_action" value="{}" '
'class="action-select"></td>'
'<th class="field-name"><a href="{}">name</a></th>'
'{}</tr></tbody>'
).format(pk, href, extra_fields)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.superuser = User.objects.create_superuser(username='super', email='a@b.com', password='xxx')
def _create_superuser(self, username):
return User.objects.create_superuser(username=username, email='a@b.com', password='xxx')
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_specified_ordering_by_f_expression(self):
class OrderedByFBandAdmin(admin.ModelAdmin):
list_display = ['name', 'genres', 'nr_of_members']
ordering = (
F('nr_of_members').desc(nulls_last=True),
Upper(F('name')).asc(),
F('genres').asc(),
)
m = OrderedByFBandAdmin(Band, custom_site)
request = self.factory.get('/band/')
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.get_ordering_field_columns(), {3: 'desc', 2: 'asc'})
def test_specified_ordering_by_f_expression_without_asc_desc(self):
class OrderedByFBandAdmin(admin.ModelAdmin):
list_display = ['name', 'genres', 'nr_of_members']
ordering = (F('nr_of_members'), Upper('name'), F('genres'))
m = OrderedByFBandAdmin(Band, custom_site)
request = self.factory.get('/band/')
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.get_ordering_field_columns(), {3: 'asc', 2: 'asc'})
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'parent': {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertIs(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
request.user = self.superuser
cl = ia.get_changelist_instance(request)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl, 'opts': Child._meta})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">-</td>')
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Empty value display can be set on AdminSite.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
request.user = self.superuser
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl, 'opts': Child._meta})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">???</td>')
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
request.user = self.superuser
m = EmptyValueChildAdmin(Child, admin.site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl, 'opts': Child._meta})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(
new_child.id,
link,
'<td class="field-age_display">&dagger;</td>'
'<td class="field-age">-empty-</td>'
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Inclusion tag result_list generates a table when with default
ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl, 'opts': Child._meta})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = build_tbody_html(new_child.id, link, '<td class="field-parent nowrap">%s</td>' % new_parent)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = m.get_changelist_instance(request)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl, 'opts': Child._meta})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">'
'</div>'
) % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name">'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
request.user = self.superuser
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
with self.assertRaises(IncorrectLookupParameters):
m.get_changelist_instance(request)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
request.user = self.superuser
m = CustomPaginationAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defines the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pk_in_search_fields(self):
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ['group__pk']
request = self.factory.get('/concert/', data={SEARCH_VAR: band.pk})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 1)
request = self.factory.get('/concert/', data={SEARCH_VAR: band.pk + 5})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 0)
def test_builtin_lookup_in_search_fields(self):
band = Group.objects.create(name='The Hype')
concert = Concert.objects.create(name='Woodstock', group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ['name__iexact']
request = self.factory.get('/', data={SEARCH_VAR: 'woodstock'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get('/', data={SEARCH_VAR: 'wood'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_custom_lookup_in_search_fields(self):
band = Group.objects.create(name='The Hype')
concert = Concert.objects.create(name='Woodstock', group=band)
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ['group__name__cc']
with register_lookup(Field, Contains, lookup_name='cc'):
request = self.factory.get('/', data={SEARCH_VAR: 'Hype'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get('/', data={SEARCH_VAR: 'Woodstock'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_spanning_relations_with_custom_lookup_in_search_fields(self):
hype = Group.objects.create(name='The Hype')
concert = Concert.objects.create(name='Woodstock', group=hype)
vox = Musician.objects.create(name='Vox', age=20)
Membership.objects.create(music=vox, group=hype)
# Register a custom lookup on IntegerField to ensure that field
# traversing logic in ModelAdmin.get_search_results() works.
with register_lookup(IntegerField, Exact, lookup_name='exactly'):
m = ConcertAdmin(Concert, custom_site)
m.search_fields = ['group__members__age__exactly']
request = self.factory.get('/', data={SEARCH_VAR: '20'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [concert])
request = self.factory.get('/', data={SEARCH_VAR: '21'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [])
def test_custom_lookup_with_pk_shortcut(self):
self.assertEqual(CharPK._meta.pk.name, 'char_pk') # Not equal to 'pk'.
m = admin.ModelAdmin(CustomIdUser, custom_site)
abc = CharPK.objects.create(char_pk='abc')
abcd = CharPK.objects.create(char_pk='abcd')
m = admin.ModelAdmin(CharPK, custom_site)
m.search_fields = ['pk__exact']
request = self.factory.get('/', data={SEARCH_VAR: 'abc'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [abc])
request = self.factory.get('/', data={SEARCH_VAR: 'abcd'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertCountEqual(cl.queryset, [abcd])
def test_no_distinct_for_m2m_in_list_filter_without_params(self):
"""
If a ManyToManyField is in list_filter but isn't in any lookup params,
the changelist's query shouldn't have distinct.
"""
m = BandAdmin(Band, custom_site)
for lookup_params in ({}, {'name': 'test'}):
request = self.factory.get('/band/', lookup_params)
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertFalse(cl.queryset.query.distinct)
# A ManyToManyField in params does have distinct applied.
request = self.factory.get('/band/', {'genres': '0'})
request.user = self.superuser
cl = m.get_changelist_instance(request)
self.assertTrue(cl.queryset.query.distinct)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
request.user = self.superuser
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = m.get_changelist_instance(request)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
self.client.force_login(self.superuser)
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, str(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
request.user = self.superuser
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = m.get_changelist_instance(request)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, str(swallow.origin))
self.assertContains(response, str(swallow.load))
self.assertContains(response, str(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_multiuser_edit(self):
"""
Simultaneous edits of list_editable fields on the changelist by
different users must not result in one user's edits creating a new
object instead of modifying the correct existing object (#11313).
"""
# To replicate this issue, simulate the following steps:
# 1. User1 opens an admin changelist with list_editable fields.
# 2. User2 edits object "Foo" such that it moves to another page in
# the pagination order and saves.
# 3. User1 edits object "Foo" and saves.
# 4. The edit made by User1 does not get applied to object "Foo" but
# instead is used to create a new object (bug).
# For this test, order the changelist by the 'speed' attribute and
# display 3 objects per page (SwallowAdmin.list_per_page = 3).
# Setup the test to reflect the DB state after step 2 where User2 has
# edited the first swallow object's speed from '4' to '1'.
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
c = Swallow.objects.create(origin='Swallow C', load=5, speed=5)
d = Swallow.objects.create(origin='Swallow D', load=9, speed=9)
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
# Send the POST from User1 for step 3. It's still using the changelist
# ordering from before User2's edits in step 2.
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(d.pk),
'form-1-uuid': str(c.pk),
'form-2-uuid': str(a.pk),
'form-0-load': '9.0',
'form-0-speed': '9.0',
'form-1-load': '5.0',
'form-1-speed': '5.0',
'form-2-load': '5.0',
'form-2-speed': '4.0',
'_save': 'Save',
}
response = self.client.post(changelist_url, data, follow=True, extra={'o': '-2'})
# The object User1 edited in step 3 is displayed on the changelist and
# has the correct edits applied.
self.assertContains(response, '1 swallow was changed successfully.')
self.assertContains(response, a.origin)
a.refresh_from_db()
self.assertEqual(a.load, float(data['form-2-load']))
self.assertEqual(a.speed, float(data['form-2-speed']))
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
c.refresh_from_db()
self.assertEqual(c.load, float(data['form-1-load']))
self.assertEqual(c.speed, float(data['form-1-speed']))
d.refresh_from_db()
self.assertEqual(d.load, float(data['form-0-load']))
self.assertEqual(d.speed, float(data['form-0-speed']))
# No new swallows were created.
self.assertEqual(len(Swallow.objects.all()), 4)
def test_get_edited_object_ids(self):
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
c = Swallow.objects.create(origin='Swallow C', load=5, speed=5)
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
m = SwallowAdmin(Swallow, custom_site)
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(a.pk),
'form-1-uuid': str(b.pk),
'form-2-uuid': str(c.pk),
'form-0-load': '9.0',
'form-0-speed': '9.0',
'form-1-load': '5.0',
'form-1-speed': '5.0',
'form-2-load': '5.0',
'form-2-speed': '4.0',
'_save': 'Save',
}
request = self.factory.post(changelist_url, data=data)
pks = m._get_edited_object_pks(request, prefix='form')
self.assertEqual(sorted(pks), sorted([str(a.pk), str(b.pk), str(c.pk)]))
def test_get_list_editable_queryset(self):
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
Swallow.objects.create(origin='Swallow B', load=2, speed=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(a.pk),
'form-0-load': '10',
'_save': 'Save',
}
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
m = SwallowAdmin(Swallow, custom_site)
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix='form')
self.assertEqual(queryset.count(), 1)
data['form-0-uuid'] = 'INVALD_PRIMARY_KEY'
# The unfiltered queryset is returned if there's invalid data.
request = self.factory.post(changelist_url, data=data)
queryset = m._get_list_editable_queryset(request, prefix='form')
self.assertEqual(queryset.count(), 2)
def test_changelist_view_list_editable_changed_objects_uses_filter(self):
"""list_editable edits use a filtered queryset to limit memory usage."""
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
Swallow.objects.create(origin='Swallow B', load=2, speed=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-uuid': str(a.pk),
'form-0-load': '10',
'_save': 'Save',
}
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
with CaptureQueriesContext(connection) as context:
response = self.client.post(changelist_url, data=data)
self.assertEqual(response.status_code, 200)
self.assertIn('WHERE', context.captured_queries[4]['sql'])
self.assertIn('IN', context.captured_queries[4]['sql'])
# Check only the first few characters since the UUID may have dashes.
self.assertIn(str(a.pk)[:8], context.captured_queries[4]['sql'])
def test_deterministic_order_for_unordered_model(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model doesn't have any
default ordering defined (#17198).
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
The primary key is used in the ordering of the changelist's results to
guarantee a deterministic order, even when the model has a manager that
defines a default ordering (#17198).
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
request.user = self.superuser
cl = m.get_changelist_instance(request)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertEqual(expected_page_range, list(real_page_range))
def test_object_tools_displayed_no_add_permission(self):
"""
When ModelAdmin.has_add_permission() returns False, the object-tools
block is still shown.
"""
superuser = self._create_superuser('superuser')
m = EventAdmin(Event, custom_site)
request = self._mocked_authenticated_request('/event/', superuser)
self.assertFalse(m.has_add_permission(request))
response = m.changelist_view(request)
self.assertIn('<ul class="object-tools">', response.rendered_content)
# The "Add" button inside the object-tools shouldn't appear.
self.assertNotIn('Add ', response.rendered_content)
class GetAdminLogTests(TestCase):
def test_custom_user_pk_not_named_id(self):
"""
{% get_admin_log %} works if the user model's primary key isn't named
'id'.
"""
context = Context({'user': CustomIdUser()})
template = Template('{% load log %}{% get_admin_log 10 as admin_log for_user user %}')
# This template tag just logs.
self.assertEqual(template.render(context), '')
def test_no_user(self):
"""{% get_admin_log %} works without specifying a user."""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
def test_missing_args(self):
msg = "'get_admin_log' statements require two arguments"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template('{% load log %}{% get_admin_log 10 as %}')
def test_non_integer_limit(self):
msg = "First argument to 'get_admin_log' must be an integer"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template('{% load log %}{% get_admin_log "10" as admin_log for_user user %}')
def test_without_as(self):
msg = "Second argument to 'get_admin_log' must be 'as'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template('{% load log %}{% get_admin_log 10 ad admin_log for_user user %}')
def test_without_for_user(self):
msg = "Fourth argument to 'get_admin_log' must be 'for_user'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
Template('{% load log %}{% get_admin_log 10 as admin_log foruser user %}')
@override_settings(ROOT_URLCONF='admin_changelist.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email=None)
def test_add_row_selection(self):
"""
The status line for selected rows gets updated correctly (#22038).
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:auth_user_changelist'))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
|
|
# orm/__init__.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from . import exc
from .mapper import (
Mapper,
_mapper_registry,
class_mapper,
configure_mappers,
reconstructor,
validates
)
from .interfaces import (
EXT_CONTINUE,
EXT_STOP,
PropComparator,
)
from .deprecated_interfaces import (
MapperExtension,
SessionExtension,
AttributeExtension,
)
from .util import (
aliased,
join,
object_mapper,
outerjoin,
polymorphic_union,
was_deleted,
with_parent,
with_polymorphic,
)
from .properties import ColumnProperty
from .relationships import RelationshipProperty
from .descriptor_props import (
ComparableProperty,
CompositeProperty,
SynonymProperty,
)
from .relationships import (
foreign,
remote,
)
from .session import (
Session,
object_session,
sessionmaker,
make_transient,
make_transient_to_detached
)
from .scoping import (
scoped_session
)
from . import mapper as mapperlib
from .query import AliasOption, Query, Bundle
from ..util.langhelpers import public_factory
from .. import util as _sa_util
from . import strategies as _strategies
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
relationship = public_factory(RelationshipProperty, ".orm.relationship")
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw['lazy'] = 'dynamic'
return relationship(argument, **kw)
column_property = public_factory(ColumnProperty, ".orm.column_property")
composite = public_factory(CompositeProperty, ".orm.composite")
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the
same arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(SomeItem, backref=backref('parent', lazy='subquery'))
"""
return (name, kwargs)
def deferred(*columns, **kw):
"""Indicate a column-based mapped attribute that by default will
not load unless accessed.
:param \*columns: columns to be mapped. This is typically a single
:class:`.Column` object, however a collection is supported in order
to support multiple columns mapped under the same attribute.
:param \**kw: additional keyword arguments passed to :class:`.ColumnProperty`.
.. seealso::
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kw)
mapper = public_factory(Mapper, ".orm.mapper")
synonym = public_factory(SynonymProperty, ".orm.synonym")
comparable_property = public_factory(ComparableProperty,
".orm.comparable_property")
@_sa_util.deprecated("0.7", message=":func:`.compile_mappers` "
"is renamed to :func:`.configure_mappers`")
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have
been defined.
"""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class itself
is garbage collected, its mapper is automatically disposed of as well. As
such, :func:`.clear_mappers` is only for usage in test suites that re-use
the same classes with different mappings, which is itself an extremely rare
use case - the only such use case is in fact SQLAlchemy's own test suite,
and possibly the test suites of other ORM extension libraries which
intend to test various combinations of mapper construction upon a fixed
set of classes.
"""
mapperlib._CONFIGURE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._CONFIGURE_MUTEX.release()
from . import strategy_options
joinedload = strategy_options.joinedload._unbound_fn
joinedload_all = strategy_options.joinedload._unbound_all_fn
contains_eager = strategy_options.contains_eager._unbound_fn
defer = strategy_options.defer._unbound_fn
undefer = strategy_options.undefer._unbound_fn
undefer_group = strategy_options.undefer_group._unbound_fn
load_only = strategy_options.load_only._unbound_fn
lazyload = strategy_options.lazyload._unbound_fn
lazyload_all = strategy_options.lazyload_all._unbound_all_fn
subqueryload = strategy_options.subqueryload._unbound_fn
subqueryload_all = strategy_options.subqueryload_all._unbound_all_fn
immediateload = strategy_options.immediateload._unbound_fn
noload = strategy_options.noload._unbound_fn
defaultload = strategy_options.defaultload._unbound_fn
from .strategy_options import Load
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
contains_alias = public_factory(AliasOption, ".orm.contains_alias")
def __go(lcls):
global __all__
from .. import util as sa_util
from . import dynamic
from . import events
import inspect as _inspect
__all__ = sorted(name for name, obj in lcls.items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
_sa_util.dependencies.resolve_all("sqlalchemy.orm")
__go(locals())
|
|
# Copyright (c) 2014 OpenStack Foundation
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For IronicHostManager
"""
import mock
from oslo.serialization import jsonutils
from nova import db
from nova import exception
from nova.scheduler import filters
from nova.scheduler import host_manager
from nova.scheduler import ironic_host_manager
from nova import test
from nova.tests.scheduler import ironic_fakes
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class IronicHostManagerTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
def setUp(self):
super(IronicHostManagerTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
def test_manager_public_api_signatures(self):
self.assertPublicAPISignatures(host_manager.HostManager(),
self.host_manager)
def test_state_public_api_signatures(self):
self.assertPublicAPISignatures(
host_manager.HostState("dummy",
"dummy"),
ironic_host_manager.IronicNodeState("dummy",
"dummy")
)
def test_get_all_host_states(self):
# Ensure .service is set and we have the values we expect to.
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(len(host_states_map), 4)
for i in range(4):
compute_node = ironic_fakes.COMPUTE_NODES[i]
host = compute_node['service']['host']
node = compute_node['hypervisor_hostname']
state_key = (host, node)
self.assertEqual(compute_node['service'],
host_states_map[state_key].service)
self.assertEqual(jsonutils.loads(compute_node['stats']),
host_states_map[state_key].stats)
self.assertEqual(compute_node['free_ram_mb'],
host_states_map[state_key].free_ram_mb)
self.assertEqual(compute_node['free_disk_gb'] * 1024,
host_states_map[state_key].free_disk_mb)
class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
"""Test case for IronicHostManager class."""
def setUp(self):
super(IronicHostManagerChangedNodesTestCase, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
ironic_driver = "nova.virt.ironic.driver.IronicDriver"
supported_instances = '[["i386", "baremetal", "baremetal"]]'
self.compute_node = dict(id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
stats=jsonutils.dumps(dict(
ironic_driver=ironic_driver,
cpu_arch='i386')),
supported_instances=supported_instances,
free_disk_gb=10, free_ram_mb=1024,
hypervisor_type='ironic',
hypervisor_version = 1,
hypervisor_hostname = 'fake_host')
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
def test_create_ironic_node_state(self, init_mock):
init_mock.return_value = None
compute = {'cpu_info': 'baremetal cpu'}
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
@mock.patch.object(host_manager.HostState, '__init__')
def test_create_non_ironic_host_state(self, init_mock):
init_mock.return_value = None
compute = {'cpu_info': 'other cpu'}
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(host_manager.HostState, type(host_state))
def test_get_all_host_states_after_delete_one(self):
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
# all nodes active for first call
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
# remove node4 for second call
running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4uuid']
db.compute_node_get_all(context).AndReturn(running_nodes)
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(3, len(host_states_map))
def test_get_all_host_states_after_delete_all(self):
context = 'fake_context'
self.mox.StubOutWithMock(db, 'compute_node_get_all')
# all nodes active for first call
db.compute_node_get_all(context).AndReturn(ironic_fakes.COMPUTE_NODES)
# remove all nodes for second call
db.compute_node_get_all(context).AndReturn([])
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
self.assertEqual(0, len(host_states_map))
def test_update_from_compute_node(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
self.assertEqual(1024, host.free_ram_mb)
self.assertEqual(1024, host.total_usable_ram_mb)
self.assertEqual(10240, host.free_disk_mb)
self.assertEqual(1, host.vcpus_total)
self.assertEqual(0, host.vcpus_used)
self.assertEqual(jsonutils.loads(self.compute_node['stats']),
host.stats)
self.assertEqual('ironic', host.hypervisor_type)
self.assertEqual(1, host.hypervisor_version)
self.assertEqual('fake_host', host.hypervisor_hostname)
def test_consume_identical_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=10, ephemeral_gb=0, memory_mb=1024, vcpus=1)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
def test_consume_larger_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=20, ephemeral_gb=0, memory_mb=2048, vcpus=2)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
def test_consume_smaller_instance_from_compute(self):
host = ironic_host_manager.IronicNodeState("fakehost", "fakenode")
host.update_from_compute_node(self.compute_node)
instance = dict(root_gb=5, ephemeral_gb=0, memory_mb=512, vcpus=1)
host.consume_from_instance(instance)
self.assertEqual(1, host.vcpus_used)
self.assertEqual(0, host.free_ram_mb)
self.assertEqual(0, host.free_disk_mb)
class IronicHostManagerTestFilters(test.NoDBTestCase):
"""Test filters work for IronicHostManager."""
def setUp(self):
super(IronicHostManagerTestFilters, self).setUp()
self.host_manager = ironic_host_manager.IronicHostManager()
self.fake_hosts = [ironic_host_manager.IronicNodeState(
'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
self.fake_hosts += [ironic_host_manager.IronicNodeState(
'fake_multihost', 'fake-node%s' % x) for x in range(1, 5)]
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2'])
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
# Test we returns 1 correct function
filter_classes = self.host_manager._choose_host_filters(None)
self.assertEqual(1, len(filter_classes))
self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
def _mock_get_filtered_hosts(self, info, specified_filters=None):
self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
self.host_manager._choose_host_filters(specified_filters).AndReturn(
[FakeFilterClass1])
def _verify_result(self, info, result, filters=True):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
if filters:
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(info['expected_objs']), set(result))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_specified_filters(self):
fake_properties = {'moo': 1, 'cow': 2}
specified_filters = ['FakeFilterClass1', 'FakeFilterClass2']
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info, specified_filters)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties, filter_class_names=specified_filters)
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore(self):
fake_properties = {'ignore_hosts': ['fake_host1', 'fake_host3',
'fake_host5', 'fake_multihost']}
# [1] and [3] are host2 and host4
info = {'expected_objs': [self.fake_hosts[1], self.fake_hosts[3]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_get_filtered_hosts_with_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host1', 'fake_host3',
'fake_host5']}
# [0] and [2] are host1 and host3
info = {'expected_objs': [self.fake_hosts[0], self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_no_matching_force_hosts(self):
fake_properties = {'force_hosts': ['fake_host5', 'fake_host6']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_and_force_hosts(self):
# Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
# only fake_host3 should be left.
info = {'expected_objs': [self.fake_hosts[2]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_host_and_many_nodes(self):
# Ensure all nodes returned for a host with many nodes
fake_properties = {'force_hosts': ['fake_multihost']}
info = {'expected_objs': [self.fake_hosts[4], self.fake_hosts[5],
self.fake_hosts[6], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_nodes(self):
fake_properties = {'force_nodes': ['fake-node2', 'fake-node4',
'fake-node9']}
# [5] is fake-node2, [7] is fake-node4
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_nodes(self):
# Ensure only overlapping results if both force host and node
fake_properties = {'force_hosts': ['fake_host1', 'fake_multihost'],
'force_nodes': ['fake-node2', 'fake-node9']}
# [5] is fake-node2
info = {'expected_objs': [self.fake_hosts[5]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_force_hosts_and_wrong_nodes(self):
# Ensure non-overlapping force_node and force_host yield no result
fake_properties = {'force_hosts': ['fake_multihost'],
'force_nodes': ['fake-node']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_nodes(self):
# Ensure ignore_hosts can coexist with force_nodes
fake_properties = {'force_nodes': ['fake-node4', 'fake-node2'],
'ignore_hosts': ['fake_host1', 'fake_host2']}
info = {'expected_objs': [self.fake_hosts[5], self.fake_hosts[7]],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
def test_get_filtered_hosts_with_ignore_hosts_and_force_same_nodes(self):
# Ensure ignore_hosts is processed before force_nodes
fake_properties = {'force_nodes': ['fake_node4', 'fake_node2'],
'ignore_hosts': ['fake_multihost']}
info = {'expected_objs': [],
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result, False)
|
|
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.utils import six
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, NOT_LOGGED_IN,
PERMISSION_DENIED)
from reviewboard.reviews.errors import PublishError
from reviewboard.reviews.models import Review
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import PUBLISH_ERROR
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.user import UserResource
class BaseReviewResource(MarkdownFieldsMixin, WebAPIResource):
"""Base class for review resources.
Provides common fields and functionality for all review resources.
"""
model = Review
fields = {
'body_bottom': {
'type': six.text_type,
'description': 'The review content below the comments.',
},
'body_top': {
'type': six.text_type,
'description': 'The review content above the comments.',
},
'extra_data': {
'type': dict,
'description': 'Extra data as part of the review. '
'This can be set by the API or extensions.',
},
'id': {
'type': int,
'description': 'The numeric ID of the review.',
},
'public': {
'type': bool,
'description': 'Whether or not the review is currently '
'visible to other users.',
},
'ship_it': {
'type': bool,
'description': 'Whether or not the review has been marked '
'"Ship It!"',
},
'text_type': {
'type': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The mode for the body_top and body_bottom text '
'fields.',
},
'timestamp': {
'type': six.text_type,
'description': 'The date and time that the review was posted '
'(in YYYY-MM-DD HH:MM:SS format).',
},
'user': {
'type': UserResource,
'description': 'The user who wrote the review.',
},
}
last_modified_field = 'timestamp'
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
def serialize_body_top_field(self, obj, **kwargs):
return self.normalize_text(obj, obj.body_top, **kwargs)
def serialize_body_bottom_field(self, obj, **kwargs):
return self.normalize_text(obj, obj.body_bottom, **kwargs)
def get_queryset(self, request, is_list=False, *args, **kwargs):
review_request = resources.review_request.get_object(
request, *args, **kwargs)
q = Q(review_request=review_request) & \
Q(**self.get_base_reply_to_field(*args, **kwargs))
if is_list:
# We don't want to show drafts in the list.
q = q & Q(public=True)
return self.model.objects.filter(q)
def get_base_reply_to_field(self):
raise NotImplementedError
def has_access_permissions(self, request, review, *args, **kwargs):
return review.is_accessible_by(request.user)
def has_modify_permissions(self, request, review, *args, **kwargs):
return review.is_mutable_by(request.user)
def has_delete_permissions(self, request, review, *args, **kwargs):
return review.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional={
'ship_it': {
'type': bool,
'description': 'Whether or not to mark the review "Ship It!"',
},
'body_top': {
'type': six.text_type,
'description': 'The review content above the comments.',
},
'body_bottom': {
'type': six.text_type,
'description': 'The review content below the comments.',
},
'public': {
'type': bool,
'description': 'Whether or not to make the review public. '
'If a review is public, it cannot be made '
'private again.',
},
'text_type': {
'type': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The mode for the body_top and body_bottom '
'text fields. The default is "plain".',
},
},
allow_unknown=True
)
def create(self, request, *args, **kwargs):
"""Creates a new review.
The new review will start off as private. Only the author of the
review (the user who is logged in and issuing this API call) will
be able to see and interact with the review.
Initial data for the review can be provided by passing data for
any number of the fields. If nothing is provided, the review will
start off as blank.
If ``text_type`` is provided and set to ``markdown``, then the
``body_top`` and ``body_bottom`` fields will be set to be interpreted
as Markdown. Otherwise, it will be interpreted as plain text.
If the user submitting this review already has a pending draft review
on this review request, then this will update the existing draft and
return :http:`303`. Otherwise, this will create a new draft and
return :http:`201`. Either way, this request will return without
a payload and with a ``Location`` header pointing to the location of
the new draft review.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
review, is_new = Review.objects.get_or_create(
review_request=review_request,
user=request.user,
public=False,
**self.get_base_reply_to_field(*args, **kwargs))
if is_new:
status_code = 201 # Created
else:
# This already exists. Go ahead and update, but we're going to
# redirect the user to the right place.
status_code = 303 # See Other
result = self._update_review(request, review, *args, **kwargs)
if not isinstance(result, tuple) or result[0] != 200:
return result
else:
return status_code, result[1], {
'Location': self.get_href(review, request, *args, **kwargs),
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(
optional={
'ship_it': {
'type': bool,
'description': 'Whether or not to mark the review "Ship It!"',
},
'body_top': {
'type': six.text_type,
'description': 'The review content above the comments.',
},
'body_bottom': {
'type': six.text_type,
'description': 'The review content below the comments.',
},
'public': {
'type': bool,
'description': 'Whether or not to make the review public. '
'If a review is public, it cannot be made '
'private again.',
},
'text_type': {
'type': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The mode for the body_top and body_bottom '
'text fields. The default is "plain".',
},
},
allow_unknown=True
)
def update(self, request, *args, **kwargs):
"""Updates the fields of an unpublished review.
Only the owner of a review can make changes. One or more fields can
be updated at once.
If ``text_type`` is provided and changed from the original value, then
the ``body_top`` and ``body_bottom`` fields will be set to be
interpreted according to the new type.
When setting to ``markdown`` and not specifying any new text, the
existing text will be escaped so as not to be unintentionally
interpreted as Markdown.
When setting to ``plain``, and new text is not provided, the existing
text will be unescaped.
The only special field is ``public``, which, if set to true, will
publish the review. The review will then be made publicly visible. Once
public, the review cannot be modified or made private again.
"""
try:
resources.review_request.get_object(request, *args, **kwargs)
review = resources.review.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
return self._update_review(request, review, *args, **kwargs)
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Deletes the draft review.
This only works for draft reviews, not public reviews. It will
delete the review and all comments on it. This cannot be undone.
Only the user who owns the draft can delete it.
Upon deletion, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Returns information on a particular review.
If the review is not public, then the client's logged in user
must either be the owner of the review. Otherwise, an error will
be returned.
"""
pass
def _update_review(self, request, review, public=None, extra_fields={},
*args, **kwargs):
"""Common function to update fields on a draft review."""
if not self.has_modify_permissions(request, review):
# Can't modify published reviews or those not belonging
# to the user.
return self._no_access_error(request.user)
old_rich_text = review.rich_text
for field in ('ship_it', 'body_top', 'body_bottom'):
value = kwargs.get(field, None)
if value is not None:
if isinstance(value, six.string_types):
value = value.strip()
setattr(review, field, value)
if 'text_type' in kwargs:
review.rich_text = \
(kwargs['text_type'] == self.TEXT_TYPE_MARKDOWN)
self.normalize_markdown_fields(review, ['body_top', 'body_bottom'],
old_rich_text, **kwargs)
self._import_extra_data(review.extra_data, extra_fields)
review.save()
if public:
try:
review.publish(user=request.user)
except PublishError as e:
return PUBLISH_ERROR.with_message(e.msg)
return 200, {
self.item_result_key: review,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.